1 /*
2  * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
3  *
4  * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
5  * Copyright (C) 2012 Broadcom Corporation
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  */
12 
13 #include <linux/bitops.h>
14 #include <linux/bug.h>
15 #include <linux/clk.h>
16 #include <linux/compiler.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/kernel.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/platform_device.h>
29 #include <linux/sched.h>
30 #include <linux/seq_file.h>
31 #include <linux/slab.h>
32 #include <linux/timer.h>
33 #include <linux/usb/ch9.h>
34 #include <linux/usb/gadget.h>
35 #include <linux/workqueue.h>
36 
37 #include <bcm63xx_cpu.h>
38 #include <bcm63xx_iudma.h>
39 #include <bcm63xx_dev_usb_usbd.h>
40 #include <bcm63xx_io.h>
41 #include <bcm63xx_regs.h>
42 
43 #define DRV_MODULE_NAME		"bcm63xx_udc"
44 
45 static const char bcm63xx_ep0name[] = "ep0";
46 
47 static const struct {
48 	const char *name;
49 	const struct usb_ep_caps caps;
50 } bcm63xx_ep_info[] = {
51 #define EP_INFO(_name, _caps) \
52 	{ \
53 		.name = _name, \
54 		.caps = _caps, \
55 	}
56 
57 	EP_INFO(bcm63xx_ep0name,
58 		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
59 	EP_INFO("ep1in-bulk",
60 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
61 	EP_INFO("ep2out-bulk",
62 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
63 	EP_INFO("ep3in-int",
64 		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
65 	EP_INFO("ep4out-int",
66 		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
67 
68 #undef EP_INFO
69 };
70 
71 static bool use_fullspeed;
72 module_param(use_fullspeed, bool, S_IRUGO);
73 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
74 
75 /*
76  * RX IRQ coalescing options:
77  *
78  * false (default) - one IRQ per DATAx packet.  Slow but reliable.  The
79  * driver is able to pass the "testusb" suite and recover from conditions like:
80  *
81  *   1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
82  *   2) Host sends 512 bytes of data
83  *   3) Host decides to reconfigure the device and sends SET_INTERFACE
84  *   4) Device shuts down the endpoint and cancels the RX transaction
85  *
86  * true - one IRQ per transfer, for transfers <= 2048B.  Generates
87  * considerably fewer IRQs, but error recovery is less robust.  Does not
88  * reliably pass "testusb".
89  *
90  * TX always uses coalescing, because we can cancel partially complete TX
91  * transfers by repeatedly flushing the FIFO.  The hardware doesn't allow
92  * this on RX.
93  */
94 static bool irq_coalesce;
95 module_param(irq_coalesce, bool, S_IRUGO);
96 MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
97 
98 #define BCM63XX_NUM_EP			5
99 #define BCM63XX_NUM_IUDMA		6
100 #define BCM63XX_NUM_FIFO_PAIRS		3
101 
102 #define IUDMA_RESET_TIMEOUT_US		10000
103 
104 #define IUDMA_EP0_RXCHAN		0
105 #define IUDMA_EP0_TXCHAN		1
106 
107 #define IUDMA_MAX_FRAGMENT		2048
108 #define BCM63XX_MAX_CTRL_PKT		64
109 
110 #define BCMEP_CTRL			0x00
111 #define BCMEP_ISOC			0x01
112 #define BCMEP_BULK			0x02
113 #define BCMEP_INTR			0x03
114 
115 #define BCMEP_OUT			0x00
116 #define BCMEP_IN			0x01
117 
118 #define BCM63XX_SPD_FULL		1
119 #define BCM63XX_SPD_HIGH		0
120 
121 #define IUDMA_DMAC_OFFSET		0x200
122 #define IUDMA_DMAS_OFFSET		0x400
123 
124 enum bcm63xx_ep0_state {
125 	EP0_REQUEUE,
126 	EP0_IDLE,
127 	EP0_IN_DATA_PHASE_SETUP,
128 	EP0_IN_DATA_PHASE_COMPLETE,
129 	EP0_OUT_DATA_PHASE_SETUP,
130 	EP0_OUT_DATA_PHASE_COMPLETE,
131 	EP0_OUT_STATUS_PHASE,
132 	EP0_IN_FAKE_STATUS_PHASE,
133 	EP0_SHUTDOWN,
134 };
135 
136 static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
137 	"REQUEUE",
138 	"IDLE",
139 	"IN_DATA_PHASE_SETUP",
140 	"IN_DATA_PHASE_COMPLETE",
141 	"OUT_DATA_PHASE_SETUP",
142 	"OUT_DATA_PHASE_COMPLETE",
143 	"OUT_STATUS_PHASE",
144 	"IN_FAKE_STATUS_PHASE",
145 	"SHUTDOWN",
146 };
147 
148 /**
149  * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
150  * @ep_num: USB endpoint number.
151  * @n_bds: Number of buffer descriptors in the ring.
152  * @ep_type: Endpoint type (control, bulk, interrupt).
153  * @dir: Direction (in, out).
154  * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
155  * @max_pkt_hs: Maximum packet size in high speed mode.
156  * @max_pkt_fs: Maximum packet size in full speed mode.
157  */
158 struct iudma_ch_cfg {
159 	int				ep_num;
160 	int				n_bds;
161 	int				ep_type;
162 	int				dir;
163 	int				n_fifo_slots;
164 	int				max_pkt_hs;
165 	int				max_pkt_fs;
166 };
167 
168 static const struct iudma_ch_cfg iudma_defaults[] = {
169 
170 	/* This controller was designed to support a CDC/RNDIS application.
171 	   It may be possible to reconfigure some of the endpoints, but
172 	   the hardware limitations (FIFO sizing and number of DMA channels)
173 	   may significantly impact flexibility and/or stability.  Change
174 	   these values at your own risk.
175 
176 	      ep_num       ep_type           n_fifo_slots    max_pkt_fs
177 	idx      |  n_bds     |         dir       |  max_pkt_hs  |
178 	 |       |    |       |          |        |      |       |       */
179 	[0] = { -1,   4, BCMEP_CTRL, BCMEP_OUT,  32,    64,     64 },
180 	[1] = {  0,   4, BCMEP_CTRL, BCMEP_OUT,  32,    64,     64 },
181 	[2] = {  2,  16, BCMEP_BULK, BCMEP_OUT, 128,   512,     64 },
182 	[3] = {  1,  16, BCMEP_BULK, BCMEP_IN,  128,   512,     64 },
183 	[4] = {  4,   4, BCMEP_INTR, BCMEP_OUT,  32,    64,     64 },
184 	[5] = {  3,   4, BCMEP_INTR, BCMEP_IN,   32,    64,     64 },
185 };
186 
187 struct bcm63xx_udc;
188 
189 /**
190  * struct iudma_ch - Represents the current state of a single IUDMA channel.
191  * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
192  * @ep_num: USB endpoint number.  -1 for ep0 RX.
193  * @enabled: Whether bcm63xx_ep_enable() has been called.
194  * @max_pkt: "Chunk size" on the USB interface.  Based on interface speed.
195  * @is_tx: true for TX, false for RX.
196  * @bep: Pointer to the associated endpoint.  NULL for ep0 RX.
197  * @udc: Reference to the device controller.
198  * @read_bd: Next buffer descriptor to reap from the hardware.
199  * @write_bd: Next BD available for a new packet.
200  * @end_bd: Points to the final BD in the ring.
201  * @n_bds_used: Number of BD entries currently occupied.
202  * @bd_ring: Base pointer to the BD ring.
203  * @bd_ring_dma: Physical (DMA) address of bd_ring.
204  * @n_bds: Total number of BDs in the ring.
205  *
206  * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
207  * bidirectional.  The "struct usb_ep" associated with ep0 is for TX (IN)
208  * only.
209  *
210  * Each bulk/intr endpoint has a single IUDMA channel and a single
211  * struct usb_ep.
212  */
213 struct iudma_ch {
214 	unsigned int			ch_idx;
215 	int				ep_num;
216 	bool				enabled;
217 	int				max_pkt;
218 	bool				is_tx;
219 	struct bcm63xx_ep		*bep;
220 	struct bcm63xx_udc		*udc;
221 
222 	struct bcm_enet_desc		*read_bd;
223 	struct bcm_enet_desc		*write_bd;
224 	struct bcm_enet_desc		*end_bd;
225 	int				n_bds_used;
226 
227 	struct bcm_enet_desc		*bd_ring;
228 	dma_addr_t			bd_ring_dma;
229 	unsigned int			n_bds;
230 };
231 
232 /**
233  * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
234  * @ep_num: USB endpoint number.
235  * @iudma: Pointer to IUDMA channel state.
236  * @ep: USB gadget layer representation of the EP.
237  * @udc: Reference to the device controller.
238  * @queue: Linked list of outstanding requests for this EP.
239  * @halted: 1 if the EP is stalled; 0 otherwise.
240  */
241 struct bcm63xx_ep {
242 	unsigned int			ep_num;
243 	struct iudma_ch			*iudma;
244 	struct usb_ep			ep;
245 	struct bcm63xx_udc		*udc;
246 	struct list_head		queue;
247 	unsigned			halted:1;
248 };
249 
250 /**
251  * struct bcm63xx_req - Internal (driver) state of a single request.
252  * @queue: Links back to the EP's request list.
253  * @req: USB gadget layer representation of the request.
254  * @offset: Current byte offset into the data buffer (next byte to queue).
255  * @bd_bytes: Number of data bytes in outstanding BD entries.
256  * @iudma: IUDMA channel used for the request.
257  */
258 struct bcm63xx_req {
259 	struct list_head		queue;		/* ep's requests */
260 	struct usb_request		req;
261 	unsigned int			offset;
262 	unsigned int			bd_bytes;
263 	struct iudma_ch			*iudma;
264 };
265 
266 /**
267  * struct bcm63xx_udc - Driver/hardware private context.
268  * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
269  * @dev: Generic Linux device structure.
270  * @pd: Platform data (board/port info).
271  * @usbd_clk: Clock descriptor for the USB device block.
272  * @usbh_clk: Clock descriptor for the USB host block.
273  * @gadget: USB slave device.
274  * @driver: Driver for USB slave devices.
275  * @usbd_regs: Base address of the USBD/USB20D block.
276  * @iudma_regs: Base address of the USBD's associated IUDMA block.
277  * @bep: Array of endpoints, including ep0.
278  * @iudma: Array of all IUDMA channels used by this controller.
279  * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
280  * @iface: USB interface number, from SET_INTERFACE wIndex.
281  * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
282  * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
283  * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
284  * @ep0state: Current state of the ep0 state machine.
285  * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
286  * @wedgemap: Bitmap of wedged endpoints.
287  * @ep0_req_reset: USB reset is pending.
288  * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
289  * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
290  * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
291  * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
292  * @ep0_reply: Pending reply from gadget driver.
293  * @ep0_request: Outstanding ep0 request.
294  * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
295  * @debugfs_usbd: debugfs file "usbd" for controller state.
296  * @debugfs_iudma: debugfs file "usbd" for IUDMA state.
297  */
298 struct bcm63xx_udc {
299 	spinlock_t			lock;
300 
301 	struct device			*dev;
302 	struct bcm63xx_usbd_platform_data *pd;
303 	struct clk			*usbd_clk;
304 	struct clk			*usbh_clk;
305 
306 	struct usb_gadget		gadget;
307 	struct usb_gadget_driver	*driver;
308 
309 	void __iomem			*usbd_regs;
310 	void __iomem			*iudma_regs;
311 
312 	struct bcm63xx_ep		bep[BCM63XX_NUM_EP];
313 	struct iudma_ch			iudma[BCM63XX_NUM_IUDMA];
314 
315 	int				cfg;
316 	int				iface;
317 	int				alt_iface;
318 
319 	struct bcm63xx_req		ep0_ctrl_req;
320 	u8				*ep0_ctrl_buf;
321 
322 	int				ep0state;
323 	struct work_struct		ep0_wq;
324 
325 	unsigned long			wedgemap;
326 
327 	unsigned			ep0_req_reset:1;
328 	unsigned			ep0_req_set_cfg:1;
329 	unsigned			ep0_req_set_iface:1;
330 	unsigned			ep0_req_shutdown:1;
331 
332 	unsigned			ep0_req_completed:1;
333 	struct usb_request		*ep0_reply;
334 	struct usb_request		*ep0_request;
335 
336 	struct dentry			*debugfs_root;
337 	struct dentry			*debugfs_usbd;
338 	struct dentry			*debugfs_iudma;
339 };
340 
341 static const struct usb_ep_ops bcm63xx_udc_ep_ops;
342 
343 /***********************************************************************
344  * Convenience functions
345  ***********************************************************************/
346 
347 static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
348 {
349 	return container_of(g, struct bcm63xx_udc, gadget);
350 }
351 
352 static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
353 {
354 	return container_of(ep, struct bcm63xx_ep, ep);
355 }
356 
357 static inline struct bcm63xx_req *our_req(struct usb_request *req)
358 {
359 	return container_of(req, struct bcm63xx_req, req);
360 }
361 
362 static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
363 {
364 	return bcm_readl(udc->usbd_regs + off);
365 }
366 
367 static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
368 {
369 	bcm_writel(val, udc->usbd_regs + off);
370 }
371 
372 static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
373 {
374 	return bcm_readl(udc->iudma_regs + off);
375 }
376 
377 static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
378 {
379 	bcm_writel(val, udc->iudma_regs + off);
380 }
381 
382 static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
383 {
384 	return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
385 			(ENETDMA_CHAN_WIDTH * chan));
386 }
387 
388 static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
389 					int chan)
390 {
391 	bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
392 			(ENETDMA_CHAN_WIDTH * chan));
393 }
394 
395 static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
396 {
397 	return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
398 			(ENETDMA_CHAN_WIDTH * chan));
399 }
400 
401 static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
402 					int chan)
403 {
404 	bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
405 			(ENETDMA_CHAN_WIDTH * chan));
406 }
407 
408 static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
409 {
410 	if (is_enabled) {
411 		clk_enable(udc->usbh_clk);
412 		clk_enable(udc->usbd_clk);
413 		udelay(10);
414 	} else {
415 		clk_disable(udc->usbd_clk);
416 		clk_disable(udc->usbh_clk);
417 	}
418 }
419 
420 /***********************************************************************
421  * Low-level IUDMA / FIFO operations
422  ***********************************************************************/
423 
424 /**
425  * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
426  * @udc: Reference to the device controller.
427  * @idx: Desired init_sel value.
428  *
429  * The "init_sel" signal is used as a selection index for both endpoints
430  * and IUDMA channels.  Since these do not map 1:1, the use of this signal
431  * depends on the context.
432  */
433 static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
434 {
435 	u32 val = usbd_readl(udc, USBD_CONTROL_REG);
436 
437 	val &= ~USBD_CONTROL_INIT_SEL_MASK;
438 	val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
439 	usbd_writel(udc, val, USBD_CONTROL_REG);
440 }
441 
442 /**
443  * bcm63xx_set_stall - Enable/disable stall on one endpoint.
444  * @udc: Reference to the device controller.
445  * @bep: Endpoint on which to operate.
446  * @is_stalled: true to enable stall, false to disable.
447  *
448  * See notes in bcm63xx_update_wedge() regarding automatic clearing of
449  * halt/stall conditions.
450  */
451 static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
452 	bool is_stalled)
453 {
454 	u32 val;
455 
456 	val = USBD_STALL_UPDATE_MASK |
457 		(is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
458 		(bep->ep_num << USBD_STALL_EPNUM_SHIFT);
459 	usbd_writel(udc, val, USBD_STALL_REG);
460 }
461 
462 /**
463  * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
464  * @udc: Reference to the device controller.
465  *
466  * These parameters depend on the USB link speed.  Settings are
467  * per-IUDMA-channel-pair.
468  */
469 static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
470 {
471 	int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
472 	u32 i, val, rx_fifo_slot, tx_fifo_slot;
473 
474 	/* set up FIFO boundaries and packet sizes; this is done in pairs */
475 	rx_fifo_slot = tx_fifo_slot = 0;
476 	for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
477 		const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
478 		const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
479 
480 		bcm63xx_ep_dma_select(udc, i >> 1);
481 
482 		val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
483 			((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
484 			 USBD_RXFIFO_CONFIG_END_SHIFT);
485 		rx_fifo_slot += rx_cfg->n_fifo_slots;
486 		usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
487 		usbd_writel(udc,
488 			    is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
489 			    USBD_RXFIFO_EPSIZE_REG);
490 
491 		val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
492 			((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
493 			 USBD_TXFIFO_CONFIG_END_SHIFT);
494 		tx_fifo_slot += tx_cfg->n_fifo_slots;
495 		usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
496 		usbd_writel(udc,
497 			    is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
498 			    USBD_TXFIFO_EPSIZE_REG);
499 
500 		usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
501 	}
502 }
503 
504 /**
505  * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
506  * @udc: Reference to the device controller.
507  * @ep_num: Endpoint number.
508  */
509 static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
510 {
511 	u32 val;
512 
513 	bcm63xx_ep_dma_select(udc, ep_num);
514 
515 	val = usbd_readl(udc, USBD_CONTROL_REG);
516 	val |= USBD_CONTROL_FIFO_RESET_MASK;
517 	usbd_writel(udc, val, USBD_CONTROL_REG);
518 	usbd_readl(udc, USBD_CONTROL_REG);
519 }
520 
521 /**
522  * bcm63xx_fifo_reset - Flush all hardware FIFOs.
523  * @udc: Reference to the device controller.
524  */
525 static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
526 {
527 	int i;
528 
529 	for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
530 		bcm63xx_fifo_reset_ep(udc, i);
531 }
532 
533 /**
534  * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
535  * @udc: Reference to the device controller.
536  */
537 static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
538 {
539 	u32 i, val;
540 
541 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
542 		const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
543 
544 		if (cfg->ep_num < 0)
545 			continue;
546 
547 		bcm63xx_ep_dma_select(udc, cfg->ep_num);
548 		val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
549 			((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
550 		usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
551 	}
552 }
553 
554 /**
555  * bcm63xx_ep_setup - Configure per-endpoint settings.
556  * @udc: Reference to the device controller.
557  *
558  * This needs to be rerun if the speed/cfg/intf/altintf changes.
559  */
560 static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
561 {
562 	u32 val, i;
563 
564 	usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
565 
566 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
567 		const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
568 		int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
569 			      cfg->max_pkt_hs : cfg->max_pkt_fs;
570 		int idx = cfg->ep_num;
571 
572 		udc->iudma[i].max_pkt = max_pkt;
573 
574 		if (idx < 0)
575 			continue;
576 		usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
577 
578 		val = (idx << USBD_CSR_EP_LOG_SHIFT) |
579 		      (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
580 		      (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
581 		      (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
582 		      (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
583 		      (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
584 		      (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
585 		usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
586 	}
587 }
588 
589 /**
590  * iudma_write - Queue a single IUDMA transaction.
591  * @udc: Reference to the device controller.
592  * @iudma: IUDMA channel to use.
593  * @breq: Request containing the transaction data.
594  *
595  * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
596  * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
597  * So iudma_write() may be called several times to fulfill a single
598  * usb_request.
599  *
600  * For TX IUDMA, this can queue multiple buffer descriptors if needed.
601  */
602 static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
603 	struct bcm63xx_req *breq)
604 {
605 	int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
606 	unsigned int bytes_left = breq->req.length - breq->offset;
607 	const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
608 		iudma->max_pkt : IUDMA_MAX_FRAGMENT;
609 
610 	iudma->n_bds_used = 0;
611 	breq->bd_bytes = 0;
612 	breq->iudma = iudma;
613 
614 	if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
615 		extra_zero_pkt = 1;
616 
617 	do {
618 		struct bcm_enet_desc *d = iudma->write_bd;
619 		u32 dmaflags = 0;
620 		unsigned int n_bytes;
621 
622 		if (d == iudma->end_bd) {
623 			dmaflags |= DMADESC_WRAP_MASK;
624 			iudma->write_bd = iudma->bd_ring;
625 		} else {
626 			iudma->write_bd++;
627 		}
628 		iudma->n_bds_used++;
629 
630 		n_bytes = min_t(int, bytes_left, max_bd_bytes);
631 		if (n_bytes)
632 			dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
633 		else
634 			dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
635 				    DMADESC_USB_ZERO_MASK;
636 
637 		dmaflags |= DMADESC_OWNER_MASK;
638 		if (first_bd) {
639 			dmaflags |= DMADESC_SOP_MASK;
640 			first_bd = 0;
641 		}
642 
643 		/*
644 		 * extra_zero_pkt forces one more iteration through the loop
645 		 * after all data is queued up, to send the zero packet
646 		 */
647 		if (extra_zero_pkt && !bytes_left)
648 			extra_zero_pkt = 0;
649 
650 		if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
651 		    (n_bytes == bytes_left && !extra_zero_pkt)) {
652 			last_bd = 1;
653 			dmaflags |= DMADESC_EOP_MASK;
654 		}
655 
656 		d->address = breq->req.dma + breq->offset;
657 		mb();
658 		d->len_stat = dmaflags;
659 
660 		breq->offset += n_bytes;
661 		breq->bd_bytes += n_bytes;
662 		bytes_left -= n_bytes;
663 	} while (!last_bd);
664 
665 	usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
666 			ENETDMAC_CHANCFG_REG, iudma->ch_idx);
667 }
668 
669 /**
670  * iudma_read - Check for IUDMA buffer completion.
671  * @udc: Reference to the device controller.
672  * @iudma: IUDMA channel to use.
673  *
674  * This checks to see if ALL of the outstanding BDs on the DMA channel
675  * have been filled.  If so, it returns the actual transfer length;
676  * otherwise it returns -EBUSY.
677  */
678 static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
679 {
680 	int i, actual_len = 0;
681 	struct bcm_enet_desc *d = iudma->read_bd;
682 
683 	if (!iudma->n_bds_used)
684 		return -EINVAL;
685 
686 	for (i = 0; i < iudma->n_bds_used; i++) {
687 		u32 dmaflags;
688 
689 		dmaflags = d->len_stat;
690 
691 		if (dmaflags & DMADESC_OWNER_MASK)
692 			return -EBUSY;
693 
694 		actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
695 			      DMADESC_LENGTH_SHIFT;
696 		if (d == iudma->end_bd)
697 			d = iudma->bd_ring;
698 		else
699 			d++;
700 	}
701 
702 	iudma->read_bd = d;
703 	iudma->n_bds_used = 0;
704 	return actual_len;
705 }
706 
707 /**
708  * iudma_reset_channel - Stop DMA on a single channel.
709  * @udc: Reference to the device controller.
710  * @iudma: IUDMA channel to reset.
711  */
712 static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
713 {
714 	int timeout = IUDMA_RESET_TIMEOUT_US;
715 	struct bcm_enet_desc *d;
716 	int ch_idx = iudma->ch_idx;
717 
718 	if (!iudma->is_tx)
719 		bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
720 
721 	/* stop DMA, then wait for the hardware to wrap up */
722 	usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
723 
724 	while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
725 				   ENETDMAC_CHANCFG_EN_MASK) {
726 		udelay(1);
727 
728 		/* repeatedly flush the FIFO data until the BD completes */
729 		if (iudma->is_tx && iudma->ep_num >= 0)
730 			bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
731 
732 		if (!timeout--) {
733 			dev_err(udc->dev, "can't reset IUDMA channel %d\n",
734 				ch_idx);
735 			break;
736 		}
737 		if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
738 			dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
739 				 ch_idx);
740 			usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
741 					ENETDMAC_CHANCFG_REG, ch_idx);
742 		}
743 	}
744 	usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
745 
746 	/* don't leave "live" HW-owned entries for the next guy to step on */
747 	for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
748 		d->len_stat = 0;
749 	mb();
750 
751 	iudma->read_bd = iudma->write_bd = iudma->bd_ring;
752 	iudma->n_bds_used = 0;
753 
754 	/* set up IRQs, UBUS burst size, and BD base for this channel */
755 	usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
756 			ENETDMAC_IRMASK_REG, ch_idx);
757 	usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
758 
759 	usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
760 	usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
761 }
762 
763 /**
764  * iudma_init_channel - One-time IUDMA channel initialization.
765  * @udc: Reference to the device controller.
766  * @ch_idx: Channel to initialize.
767  */
768 static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
769 {
770 	struct iudma_ch *iudma = &udc->iudma[ch_idx];
771 	const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
772 	unsigned int n_bds = cfg->n_bds;
773 	struct bcm63xx_ep *bep = NULL;
774 
775 	iudma->ep_num = cfg->ep_num;
776 	iudma->ch_idx = ch_idx;
777 	iudma->is_tx = !!(ch_idx & 0x01);
778 	if (iudma->ep_num >= 0) {
779 		bep = &udc->bep[iudma->ep_num];
780 		bep->iudma = iudma;
781 		INIT_LIST_HEAD(&bep->queue);
782 	}
783 
784 	iudma->bep = bep;
785 	iudma->udc = udc;
786 
787 	/* ep0 is always active; others are controlled by the gadget driver */
788 	if (iudma->ep_num <= 0)
789 		iudma->enabled = true;
790 
791 	iudma->n_bds = n_bds;
792 	iudma->bd_ring = dmam_alloc_coherent(udc->dev,
793 		n_bds * sizeof(struct bcm_enet_desc),
794 		&iudma->bd_ring_dma, GFP_KERNEL);
795 	if (!iudma->bd_ring)
796 		return -ENOMEM;
797 	iudma->end_bd = &iudma->bd_ring[n_bds - 1];
798 
799 	return 0;
800 }
801 
802 /**
803  * iudma_init - One-time initialization of all IUDMA channels.
804  * @udc: Reference to the device controller.
805  *
806  * Enable DMA, flush channels, and enable global IUDMA IRQs.
807  */
808 static int iudma_init(struct bcm63xx_udc *udc)
809 {
810 	int i, rc;
811 
812 	usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
813 
814 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
815 		rc = iudma_init_channel(udc, i);
816 		if (rc)
817 			return rc;
818 		iudma_reset_channel(udc, &udc->iudma[i]);
819 	}
820 
821 	usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
822 	return 0;
823 }
824 
825 /**
826  * iudma_uninit - Uninitialize IUDMA channels.
827  * @udc: Reference to the device controller.
828  *
829  * Kill global IUDMA IRQs, flush channels, and kill DMA.
830  */
831 static void iudma_uninit(struct bcm63xx_udc *udc)
832 {
833 	int i;
834 
835 	usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
836 
837 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
838 		iudma_reset_channel(udc, &udc->iudma[i]);
839 
840 	usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
841 }
842 
843 /***********************************************************************
844  * Other low-level USBD operations
845  ***********************************************************************/
846 
847 /**
848  * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
849  * @udc: Reference to the device controller.
850  * @enable_irqs: true to enable, false to disable.
851  */
852 static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
853 {
854 	u32 val;
855 
856 	usbd_writel(udc, 0, USBD_STATUS_REG);
857 
858 	val = BIT(USBD_EVENT_IRQ_USB_RESET) |
859 	      BIT(USBD_EVENT_IRQ_SETUP) |
860 	      BIT(USBD_EVENT_IRQ_SETCFG) |
861 	      BIT(USBD_EVENT_IRQ_SETINTF) |
862 	      BIT(USBD_EVENT_IRQ_USB_LINK);
863 	usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
864 	usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
865 }
866 
867 /**
868  * bcm63xx_select_phy_mode - Select between USB device and host mode.
869  * @udc: Reference to the device controller.
870  * @is_device: true for device, false for host.
871  *
872  * This should probably be reworked to use the drivers/usb/otg
873  * infrastructure.
874  *
875  * By default, the AFE/pullups are disabled in device mode, until
876  * bcm63xx_select_pullup() is called.
877  */
878 static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
879 {
880 	u32 val, portmask = BIT(udc->pd->port_no);
881 
882 	if (BCMCPU_IS_6328()) {
883 		/* configure pinmux to sense VBUS signal */
884 		val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
885 		val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
886 		val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
887 			       GPIO_PINMUX_OTHR_6328_USB_HOST;
888 		bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
889 	}
890 
891 	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
892 	if (is_device) {
893 		val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
894 		val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
895 	} else {
896 		val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
897 		val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
898 	}
899 	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
900 
901 	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
902 	if (is_device)
903 		val |= USBH_PRIV_SWAP_USBD_MASK;
904 	else
905 		val &= ~USBH_PRIV_SWAP_USBD_MASK;
906 	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
907 }
908 
909 /**
910  * bcm63xx_select_pullup - Enable/disable the pullup on D+
911  * @udc: Reference to the device controller.
912  * @is_on: true to enable the pullup, false to disable.
913  *
914  * If the pullup is active, the host will sense a FS/HS device connected to
915  * the port.  If the pullup is inactive, the host will think the USB
916  * device has been disconnected.
917  */
918 static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
919 {
920 	u32 val, portmask = BIT(udc->pd->port_no);
921 
922 	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
923 	if (is_on)
924 		val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
925 	else
926 		val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
927 	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
928 }
929 
930 /**
931  * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
932  * @udc: Reference to the device controller.
933  *
934  * This just masks the IUDMA IRQs and releases the clocks.  It is assumed
935  * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
936  */
937 static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
938 {
939 	set_clocks(udc, true);
940 	iudma_uninit(udc);
941 	set_clocks(udc, false);
942 
943 	clk_put(udc->usbd_clk);
944 	clk_put(udc->usbh_clk);
945 }
946 
947 /**
948  * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
949  * @udc: Reference to the device controller.
950  */
951 static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
952 {
953 	int i, rc = 0;
954 	u32 val;
955 
956 	udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
957 					 GFP_KERNEL);
958 	if (!udc->ep0_ctrl_buf)
959 		return -ENOMEM;
960 
961 	INIT_LIST_HEAD(&udc->gadget.ep_list);
962 	for (i = 0; i < BCM63XX_NUM_EP; i++) {
963 		struct bcm63xx_ep *bep = &udc->bep[i];
964 
965 		bep->ep.name = bcm63xx_ep_info[i].name;
966 		bep->ep.caps = bcm63xx_ep_info[i].caps;
967 		bep->ep_num = i;
968 		bep->ep.ops = &bcm63xx_udc_ep_ops;
969 		list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
970 		bep->halted = 0;
971 		usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
972 		bep->udc = udc;
973 		bep->ep.desc = NULL;
974 		INIT_LIST_HEAD(&bep->queue);
975 	}
976 
977 	udc->gadget.ep0 = &udc->bep[0].ep;
978 	list_del(&udc->bep[0].ep.ep_list);
979 
980 	udc->gadget.speed = USB_SPEED_UNKNOWN;
981 	udc->ep0state = EP0_SHUTDOWN;
982 
983 	udc->usbh_clk = clk_get(udc->dev, "usbh");
984 	if (IS_ERR(udc->usbh_clk))
985 		return -EIO;
986 
987 	udc->usbd_clk = clk_get(udc->dev, "usbd");
988 	if (IS_ERR(udc->usbd_clk)) {
989 		clk_put(udc->usbh_clk);
990 		return -EIO;
991 	}
992 
993 	set_clocks(udc, true);
994 
995 	val = USBD_CONTROL_AUTO_CSRS_MASK |
996 	      USBD_CONTROL_DONE_CSRS_MASK |
997 	      (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
998 	usbd_writel(udc, val, USBD_CONTROL_REG);
999 
1000 	val = USBD_STRAPS_APP_SELF_PWR_MASK |
1001 	      USBD_STRAPS_APP_RAM_IF_MASK |
1002 	      USBD_STRAPS_APP_CSRPRGSUP_MASK |
1003 	      USBD_STRAPS_APP_8BITPHY_MASK |
1004 	      USBD_STRAPS_APP_RMTWKUP_MASK;
1005 
1006 	if (udc->gadget.max_speed == USB_SPEED_HIGH)
1007 		val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
1008 	else
1009 		val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
1010 	usbd_writel(udc, val, USBD_STRAPS_REG);
1011 
1012 	bcm63xx_set_ctrl_irqs(udc, false);
1013 
1014 	usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
1015 
1016 	val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
1017 	      USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
1018 	usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
1019 
1020 	rc = iudma_init(udc);
1021 	set_clocks(udc, false);
1022 	if (rc)
1023 		bcm63xx_uninit_udc_hw(udc);
1024 
1025 	return 0;
1026 }
1027 
1028 /***********************************************************************
1029  * Standard EP gadget operations
1030  ***********************************************************************/
1031 
1032 /**
1033  * bcm63xx_ep_enable - Enable one endpoint.
1034  * @ep: Endpoint to enable.
1035  * @desc: Contains max packet, direction, etc.
1036  *
1037  * Most of the endpoint parameters are fixed in this controller, so there
1038  * isn't much for this function to do.
1039  */
1040 static int bcm63xx_ep_enable(struct usb_ep *ep,
1041 	const struct usb_endpoint_descriptor *desc)
1042 {
1043 	struct bcm63xx_ep *bep = our_ep(ep);
1044 	struct bcm63xx_udc *udc = bep->udc;
1045 	struct iudma_ch *iudma = bep->iudma;
1046 	unsigned long flags;
1047 
1048 	if (!ep || !desc || ep->name == bcm63xx_ep0name)
1049 		return -EINVAL;
1050 
1051 	if (!udc->driver)
1052 		return -ESHUTDOWN;
1053 
1054 	spin_lock_irqsave(&udc->lock, flags);
1055 	if (iudma->enabled) {
1056 		spin_unlock_irqrestore(&udc->lock, flags);
1057 		return -EINVAL;
1058 	}
1059 
1060 	iudma->enabled = true;
1061 	BUG_ON(!list_empty(&bep->queue));
1062 
1063 	iudma_reset_channel(udc, iudma);
1064 
1065 	bep->halted = 0;
1066 	bcm63xx_set_stall(udc, bep, false);
1067 	clear_bit(bep->ep_num, &udc->wedgemap);
1068 
1069 	ep->desc = desc;
1070 	ep->maxpacket = usb_endpoint_maxp(desc);
1071 
1072 	spin_unlock_irqrestore(&udc->lock, flags);
1073 	return 0;
1074 }
1075 
1076 /**
1077  * bcm63xx_ep_disable - Disable one endpoint.
1078  * @ep: Endpoint to disable.
1079  */
1080 static int bcm63xx_ep_disable(struct usb_ep *ep)
1081 {
1082 	struct bcm63xx_ep *bep = our_ep(ep);
1083 	struct bcm63xx_udc *udc = bep->udc;
1084 	struct iudma_ch *iudma = bep->iudma;
1085 	struct bcm63xx_req *breq, *n;
1086 	unsigned long flags;
1087 
1088 	if (!ep || !ep->desc)
1089 		return -EINVAL;
1090 
1091 	spin_lock_irqsave(&udc->lock, flags);
1092 	if (!iudma->enabled) {
1093 		spin_unlock_irqrestore(&udc->lock, flags);
1094 		return -EINVAL;
1095 	}
1096 	iudma->enabled = false;
1097 
1098 	iudma_reset_channel(udc, iudma);
1099 
1100 	if (!list_empty(&bep->queue)) {
1101 		list_for_each_entry_safe(breq, n, &bep->queue, queue) {
1102 			usb_gadget_unmap_request(&udc->gadget, &breq->req,
1103 						 iudma->is_tx);
1104 			list_del(&breq->queue);
1105 			breq->req.status = -ESHUTDOWN;
1106 
1107 			spin_unlock_irqrestore(&udc->lock, flags);
1108 			usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
1109 			spin_lock_irqsave(&udc->lock, flags);
1110 		}
1111 	}
1112 	ep->desc = NULL;
1113 
1114 	spin_unlock_irqrestore(&udc->lock, flags);
1115 	return 0;
1116 }
1117 
1118 /**
1119  * bcm63xx_udc_alloc_request - Allocate a new request.
1120  * @ep: Endpoint associated with the request.
1121  * @mem_flags: Flags to pass to kzalloc().
1122  */
1123 static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1124 	gfp_t mem_flags)
1125 {
1126 	struct bcm63xx_req *breq;
1127 
1128 	breq = kzalloc(sizeof(*breq), mem_flags);
1129 	if (!breq)
1130 		return NULL;
1131 	return &breq->req;
1132 }
1133 
1134 /**
1135  * bcm63xx_udc_free_request - Free a request.
1136  * @ep: Endpoint associated with the request.
1137  * @req: Request to free.
1138  */
1139 static void bcm63xx_udc_free_request(struct usb_ep *ep,
1140 	struct usb_request *req)
1141 {
1142 	struct bcm63xx_req *breq = our_req(req);
1143 	kfree(breq);
1144 }
1145 
1146 /**
1147  * bcm63xx_udc_queue - Queue up a new request.
1148  * @ep: Endpoint associated with the request.
1149  * @req: Request to add.
1150  * @mem_flags: Unused.
1151  *
1152  * If the queue is empty, start this request immediately.  Otherwise, add
1153  * it to the list.
1154  *
1155  * ep0 replies are sent through this function from the gadget driver, but
1156  * they are treated differently because they need to be handled by the ep0
1157  * state machine.  (Sometimes they are replies to control requests that
1158  * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1159  */
1160 static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1161 	gfp_t mem_flags)
1162 {
1163 	struct bcm63xx_ep *bep = our_ep(ep);
1164 	struct bcm63xx_udc *udc = bep->udc;
1165 	struct bcm63xx_req *breq = our_req(req);
1166 	unsigned long flags;
1167 	int rc = 0;
1168 
1169 	if (unlikely(!req || !req->complete || !req->buf || !ep))
1170 		return -EINVAL;
1171 
1172 	req->actual = 0;
1173 	req->status = 0;
1174 	breq->offset = 0;
1175 
1176 	if (bep == &udc->bep[0]) {
1177 		/* only one reply per request, please */
1178 		if (udc->ep0_reply)
1179 			return -EINVAL;
1180 
1181 		udc->ep0_reply = req;
1182 		schedule_work(&udc->ep0_wq);
1183 		return 0;
1184 	}
1185 
1186 	spin_lock_irqsave(&udc->lock, flags);
1187 	if (!bep->iudma->enabled) {
1188 		rc = -ESHUTDOWN;
1189 		goto out;
1190 	}
1191 
1192 	rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1193 	if (rc == 0) {
1194 		list_add_tail(&breq->queue, &bep->queue);
1195 		if (list_is_singular(&bep->queue))
1196 			iudma_write(udc, bep->iudma, breq);
1197 	}
1198 
1199 out:
1200 	spin_unlock_irqrestore(&udc->lock, flags);
1201 	return rc;
1202 }
1203 
1204 /**
1205  * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1206  * @ep: Endpoint associated with the request.
1207  * @req: Request to remove.
1208  *
1209  * If the request is not at the head of the queue, this is easy - just nuke
1210  * it.  If the request is at the head of the queue, we'll need to stop the
1211  * DMA transaction and then queue up the successor.
1212  */
1213 static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1214 {
1215 	struct bcm63xx_ep *bep = our_ep(ep);
1216 	struct bcm63xx_udc *udc = bep->udc;
1217 	struct bcm63xx_req *breq = our_req(req), *cur;
1218 	unsigned long flags;
1219 	int rc = 0;
1220 
1221 	spin_lock_irqsave(&udc->lock, flags);
1222 	if (list_empty(&bep->queue)) {
1223 		rc = -EINVAL;
1224 		goto out;
1225 	}
1226 
1227 	cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1228 	usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1229 
1230 	if (breq == cur) {
1231 		iudma_reset_channel(udc, bep->iudma);
1232 		list_del(&breq->queue);
1233 
1234 		if (!list_empty(&bep->queue)) {
1235 			struct bcm63xx_req *next;
1236 
1237 			next = list_first_entry(&bep->queue,
1238 				struct bcm63xx_req, queue);
1239 			iudma_write(udc, bep->iudma, next);
1240 		}
1241 	} else {
1242 		list_del(&breq->queue);
1243 	}
1244 
1245 out:
1246 	spin_unlock_irqrestore(&udc->lock, flags);
1247 
1248 	req->status = -ESHUTDOWN;
1249 	req->complete(ep, req);
1250 
1251 	return rc;
1252 }
1253 
1254 /**
1255  * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1256  * @ep: Endpoint to halt.
1257  * @value: Zero to clear halt; nonzero to set halt.
1258  *
1259  * See comments in bcm63xx_update_wedge().
1260  */
1261 static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1262 {
1263 	struct bcm63xx_ep *bep = our_ep(ep);
1264 	struct bcm63xx_udc *udc = bep->udc;
1265 	unsigned long flags;
1266 
1267 	spin_lock_irqsave(&udc->lock, flags);
1268 	bcm63xx_set_stall(udc, bep, !!value);
1269 	bep->halted = value;
1270 	spin_unlock_irqrestore(&udc->lock, flags);
1271 
1272 	return 0;
1273 }
1274 
1275 /**
1276  * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1277  * @ep: Endpoint to wedge.
1278  *
1279  * See comments in bcm63xx_update_wedge().
1280  */
1281 static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1282 {
1283 	struct bcm63xx_ep *bep = our_ep(ep);
1284 	struct bcm63xx_udc *udc = bep->udc;
1285 	unsigned long flags;
1286 
1287 	spin_lock_irqsave(&udc->lock, flags);
1288 	set_bit(bep->ep_num, &udc->wedgemap);
1289 	bcm63xx_set_stall(udc, bep, true);
1290 	spin_unlock_irqrestore(&udc->lock, flags);
1291 
1292 	return 0;
1293 }
1294 
1295 static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1296 	.enable		= bcm63xx_ep_enable,
1297 	.disable	= bcm63xx_ep_disable,
1298 
1299 	.alloc_request	= bcm63xx_udc_alloc_request,
1300 	.free_request	= bcm63xx_udc_free_request,
1301 
1302 	.queue		= bcm63xx_udc_queue,
1303 	.dequeue	= bcm63xx_udc_dequeue,
1304 
1305 	.set_halt	= bcm63xx_udc_set_halt,
1306 	.set_wedge	= bcm63xx_udc_set_wedge,
1307 };
1308 
1309 /***********************************************************************
1310  * EP0 handling
1311  ***********************************************************************/
1312 
1313 /**
1314  * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1315  * @udc: Reference to the device controller.
1316  * @ctrl: 8-byte SETUP request.
1317  */
1318 static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1319 	struct usb_ctrlrequest *ctrl)
1320 {
1321 	int rc;
1322 
1323 	spin_unlock_irq(&udc->lock);
1324 	rc = udc->driver->setup(&udc->gadget, ctrl);
1325 	spin_lock_irq(&udc->lock);
1326 	return rc;
1327 }
1328 
1329 /**
1330  * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1331  * @udc: Reference to the device controller.
1332  *
1333  * Many standard requests are handled automatically in the hardware, but
1334  * we still need to pass them to the gadget driver so that it can
1335  * reconfigure the interfaces/endpoints if necessary.
1336  *
1337  * Unfortunately we are not able to send a STALL response if the host
1338  * requests an invalid configuration.  If this happens, we'll have to be
1339  * content with printing a warning.
1340  */
1341 static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1342 {
1343 	struct usb_ctrlrequest ctrl;
1344 	int rc;
1345 
1346 	ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1347 	ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1348 	ctrl.wValue = cpu_to_le16(udc->cfg);
1349 	ctrl.wIndex = 0;
1350 	ctrl.wLength = 0;
1351 
1352 	rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1353 	if (rc < 0) {
1354 		dev_warn_ratelimited(udc->dev,
1355 			"hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1356 			udc->cfg);
1357 	}
1358 	return rc;
1359 }
1360 
1361 /**
1362  * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1363  * @udc: Reference to the device controller.
1364  */
1365 static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1366 {
1367 	struct usb_ctrlrequest ctrl;
1368 	int rc;
1369 
1370 	ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1371 	ctrl.bRequest = USB_REQ_SET_INTERFACE;
1372 	ctrl.wValue = cpu_to_le16(udc->alt_iface);
1373 	ctrl.wIndex = cpu_to_le16(udc->iface);
1374 	ctrl.wLength = 0;
1375 
1376 	rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1377 	if (rc < 0) {
1378 		dev_warn_ratelimited(udc->dev,
1379 			"hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1380 			udc->iface, udc->alt_iface);
1381 	}
1382 	return rc;
1383 }
1384 
1385 /**
1386  * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1387  * @udc: Reference to the device controller.
1388  * @ch_idx: IUDMA channel number.
1389  * @req: USB gadget layer representation of the request.
1390  */
1391 static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1392 	struct usb_request *req)
1393 {
1394 	struct bcm63xx_req *breq = our_req(req);
1395 	struct iudma_ch *iudma = &udc->iudma[ch_idx];
1396 
1397 	BUG_ON(udc->ep0_request);
1398 	udc->ep0_request = req;
1399 
1400 	req->actual = 0;
1401 	breq->offset = 0;
1402 	usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1403 	iudma_write(udc, iudma, breq);
1404 }
1405 
1406 /**
1407  * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1408  * @udc: Reference to the device controller.
1409  * @req: USB gadget layer representation of the request.
1410  * @status: Status to return to the gadget driver.
1411  */
1412 static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1413 	struct usb_request *req, int status)
1414 {
1415 	req->status = status;
1416 	if (status)
1417 		req->actual = 0;
1418 	if (req->complete) {
1419 		spin_unlock_irq(&udc->lock);
1420 		req->complete(&udc->bep[0].ep, req);
1421 		spin_lock_irq(&udc->lock);
1422 	}
1423 }
1424 
1425 /**
1426  * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1427  *   reset/shutdown.
1428  * @udc: Reference to the device controller.
1429  * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1430  */
1431 static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1432 {
1433 	struct usb_request *req = udc->ep0_reply;
1434 
1435 	udc->ep0_reply = NULL;
1436 	usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1437 	if (udc->ep0_request == req) {
1438 		udc->ep0_req_completed = 0;
1439 		udc->ep0_request = NULL;
1440 	}
1441 	bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1442 }
1443 
1444 /**
1445  * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1446  *   transfer len.
1447  * @udc: Reference to the device controller.
1448  */
1449 static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1450 {
1451 	struct usb_request *req = udc->ep0_request;
1452 
1453 	udc->ep0_req_completed = 0;
1454 	udc->ep0_request = NULL;
1455 
1456 	return req->actual;
1457 }
1458 
1459 /**
1460  * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1461  * @udc: Reference to the device controller.
1462  * @ch_idx: IUDMA channel number.
1463  * @length: Number of bytes to TX/RX.
1464  *
1465  * Used for simple transfers performed by the ep0 worker.  This will always
1466  * use ep0_ctrl_req / ep0_ctrl_buf.
1467  */
1468 static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1469 	int length)
1470 {
1471 	struct usb_request *req = &udc->ep0_ctrl_req.req;
1472 
1473 	req->buf = udc->ep0_ctrl_buf;
1474 	req->length = length;
1475 	req->complete = NULL;
1476 
1477 	bcm63xx_ep0_map_write(udc, ch_idx, req);
1478 }
1479 
1480 /**
1481  * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1482  * @udc: Reference to the device controller.
1483  *
1484  * EP0_IDLE probably shouldn't ever happen.  EP0_REQUEUE means we're ready
1485  * for the next packet.  Anything else means the transaction requires multiple
1486  * stages of handling.
1487  */
1488 static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1489 {
1490 	int rc;
1491 	struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1492 
1493 	rc = bcm63xx_ep0_read_complete(udc);
1494 
1495 	if (rc < 0) {
1496 		dev_err(udc->dev, "missing SETUP packet\n");
1497 		return EP0_IDLE;
1498 	}
1499 
1500 	/*
1501 	 * Handle 0-byte IN STATUS acknowledgement.  The hardware doesn't
1502 	 * ALWAYS deliver these 100% of the time, so if we happen to see one,
1503 	 * just throw it away.
1504 	 */
1505 	if (rc == 0)
1506 		return EP0_REQUEUE;
1507 
1508 	/* Drop malformed SETUP packets */
1509 	if (rc != sizeof(*ctrl)) {
1510 		dev_warn_ratelimited(udc->dev,
1511 			"malformed SETUP packet (%d bytes)\n", rc);
1512 		return EP0_REQUEUE;
1513 	}
1514 
1515 	/* Process new SETUP packet arriving on ep0 */
1516 	rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1517 	if (rc < 0) {
1518 		bcm63xx_set_stall(udc, &udc->bep[0], true);
1519 		return EP0_REQUEUE;
1520 	}
1521 
1522 	if (!ctrl->wLength)
1523 		return EP0_REQUEUE;
1524 	else if (ctrl->bRequestType & USB_DIR_IN)
1525 		return EP0_IN_DATA_PHASE_SETUP;
1526 	else
1527 		return EP0_OUT_DATA_PHASE_SETUP;
1528 }
1529 
1530 /**
1531  * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1532  * @udc: Reference to the device controller.
1533  *
1534  * In state EP0_IDLE, the RX descriptor is either pending, or has been
1535  * filled with a SETUP packet from the host.  This function handles new
1536  * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1537  * and reset/shutdown events.
1538  *
1539  * Returns 0 if work was done; -EAGAIN if nothing to do.
1540  */
1541 static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1542 {
1543 	if (udc->ep0_req_reset) {
1544 		udc->ep0_req_reset = 0;
1545 	} else if (udc->ep0_req_set_cfg) {
1546 		udc->ep0_req_set_cfg = 0;
1547 		if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1548 			udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1549 	} else if (udc->ep0_req_set_iface) {
1550 		udc->ep0_req_set_iface = 0;
1551 		if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1552 			udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1553 	} else if (udc->ep0_req_completed) {
1554 		udc->ep0state = bcm63xx_ep0_do_setup(udc);
1555 		return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1556 	} else if (udc->ep0_req_shutdown) {
1557 		udc->ep0_req_shutdown = 0;
1558 		udc->ep0_req_completed = 0;
1559 		udc->ep0_request = NULL;
1560 		iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1561 		usb_gadget_unmap_request(&udc->gadget,
1562 			&udc->ep0_ctrl_req.req, 0);
1563 
1564 		/* bcm63xx_udc_pullup() is waiting for this */
1565 		mb();
1566 		udc->ep0state = EP0_SHUTDOWN;
1567 	} else if (udc->ep0_reply) {
1568 		/*
1569 		 * This could happen if a USB RESET shows up during an ep0
1570 		 * transaction (especially if a laggy driver like gadgetfs
1571 		 * is in use).
1572 		 */
1573 		dev_warn(udc->dev, "nuking unexpected reply\n");
1574 		bcm63xx_ep0_nuke_reply(udc, 0);
1575 	} else {
1576 		return -EAGAIN;
1577 	}
1578 
1579 	return 0;
1580 }
1581 
1582 /**
1583  * bcm63xx_ep0_one_round - Handle the current ep0 state.
1584  * @udc: Reference to the device controller.
1585  *
1586  * Returns 0 if work was done; -EAGAIN if nothing to do.
1587  */
1588 static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1589 {
1590 	enum bcm63xx_ep0_state ep0state = udc->ep0state;
1591 	bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1592 
1593 	switch (udc->ep0state) {
1594 	case EP0_REQUEUE:
1595 		/* set up descriptor to receive SETUP packet */
1596 		bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1597 					     BCM63XX_MAX_CTRL_PKT);
1598 		ep0state = EP0_IDLE;
1599 		break;
1600 	case EP0_IDLE:
1601 		return bcm63xx_ep0_do_idle(udc);
1602 	case EP0_IN_DATA_PHASE_SETUP:
1603 		/*
1604 		 * Normal case: TX request is in ep0_reply (queued by the
1605 		 * callback), or will be queued shortly.  When it's here,
1606 		 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1607 		 *
1608 		 * Shutdown case: Stop waiting for the reply.  Just
1609 		 * REQUEUE->IDLE.  The gadget driver is NOT expected to
1610 		 * queue anything else now.
1611 		 */
1612 		if (udc->ep0_reply) {
1613 			bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1614 					      udc->ep0_reply);
1615 			ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1616 		} else if (shutdown) {
1617 			ep0state = EP0_REQUEUE;
1618 		}
1619 		break;
1620 	case EP0_IN_DATA_PHASE_COMPLETE: {
1621 		/*
1622 		 * Normal case: TX packet (ep0_reply) is in flight; wait for
1623 		 * it to finish, then go back to REQUEUE->IDLE.
1624 		 *
1625 		 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1626 		 * completion to the gadget driver, then REQUEUE->IDLE.
1627 		 */
1628 		if (udc->ep0_req_completed) {
1629 			udc->ep0_reply = NULL;
1630 			bcm63xx_ep0_read_complete(udc);
1631 			/*
1632 			 * the "ack" sometimes gets eaten (see
1633 			 * bcm63xx_ep0_do_idle)
1634 			 */
1635 			ep0state = EP0_REQUEUE;
1636 		} else if (shutdown) {
1637 			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1638 			bcm63xx_ep0_nuke_reply(udc, 1);
1639 			ep0state = EP0_REQUEUE;
1640 		}
1641 		break;
1642 	}
1643 	case EP0_OUT_DATA_PHASE_SETUP:
1644 		/* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1645 		if (udc->ep0_reply) {
1646 			bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1647 					      udc->ep0_reply);
1648 			ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1649 		} else if (shutdown) {
1650 			ep0state = EP0_REQUEUE;
1651 		}
1652 		break;
1653 	case EP0_OUT_DATA_PHASE_COMPLETE: {
1654 		/* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1655 		if (udc->ep0_req_completed) {
1656 			udc->ep0_reply = NULL;
1657 			bcm63xx_ep0_read_complete(udc);
1658 
1659 			/* send 0-byte ack to host */
1660 			bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1661 			ep0state = EP0_OUT_STATUS_PHASE;
1662 		} else if (shutdown) {
1663 			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1664 			bcm63xx_ep0_nuke_reply(udc, 0);
1665 			ep0state = EP0_REQUEUE;
1666 		}
1667 		break;
1668 	}
1669 	case EP0_OUT_STATUS_PHASE:
1670 		/*
1671 		 * Normal case: 0-byte OUT ack packet is in flight; wait
1672 		 * for it to finish, then go back to REQUEUE->IDLE.
1673 		 *
1674 		 * Shutdown case: just cancel the transmission.  Don't bother
1675 		 * calling the completion, because it originated from this
1676 		 * function anyway.  Then go back to REQUEUE->IDLE.
1677 		 */
1678 		if (udc->ep0_req_completed) {
1679 			bcm63xx_ep0_read_complete(udc);
1680 			ep0state = EP0_REQUEUE;
1681 		} else if (shutdown) {
1682 			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1683 			udc->ep0_request = NULL;
1684 			ep0state = EP0_REQUEUE;
1685 		}
1686 		break;
1687 	case EP0_IN_FAKE_STATUS_PHASE: {
1688 		/*
1689 		 * Normal case: we spoofed a SETUP packet and are now
1690 		 * waiting for the gadget driver to send a 0-byte reply.
1691 		 * This doesn't actually get sent to the HW because the
1692 		 * HW has already sent its own reply.  Once we get the
1693 		 * response, return to IDLE.
1694 		 *
1695 		 * Shutdown case: return to IDLE immediately.
1696 		 *
1697 		 * Note that the ep0 RX descriptor has remained queued
1698 		 * (and possibly unfilled) during this entire transaction.
1699 		 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1700 		 * or SET_INTERFACE transactions.
1701 		 */
1702 		struct usb_request *r = udc->ep0_reply;
1703 
1704 		if (!r) {
1705 			if (shutdown)
1706 				ep0state = EP0_IDLE;
1707 			break;
1708 		}
1709 
1710 		bcm63xx_ep0_complete(udc, r, 0);
1711 		udc->ep0_reply = NULL;
1712 		ep0state = EP0_IDLE;
1713 		break;
1714 	}
1715 	case EP0_SHUTDOWN:
1716 		break;
1717 	}
1718 
1719 	if (udc->ep0state == ep0state)
1720 		return -EAGAIN;
1721 
1722 	udc->ep0state = ep0state;
1723 	return 0;
1724 }
1725 
1726 /**
1727  * bcm63xx_ep0_process - ep0 worker thread / state machine.
1728  * @w: Workqueue struct.
1729  *
1730  * bcm63xx_ep0_process is triggered any time an event occurs on ep0.  It
1731  * is used to synchronize ep0 events and ensure that both HW and SW events
1732  * occur in a well-defined order.  When the ep0 IUDMA queues are idle, it may
1733  * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1734  * by the USBD hardware.
1735  *
1736  * The worker function will continue iterating around the state machine
1737  * until there is nothing left to do.  Usually "nothing left to do" means
1738  * that we're waiting for a new event from the hardware.
1739  */
1740 static void bcm63xx_ep0_process(struct work_struct *w)
1741 {
1742 	struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1743 	spin_lock_irq(&udc->lock);
1744 	while (bcm63xx_ep0_one_round(udc) == 0)
1745 		;
1746 	spin_unlock_irq(&udc->lock);
1747 }
1748 
1749 /***********************************************************************
1750  * Standard UDC gadget operations
1751  ***********************************************************************/
1752 
1753 /**
1754  * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1755  * @gadget: USB slave device.
1756  */
1757 static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1758 {
1759 	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1760 
1761 	return (usbd_readl(udc, USBD_STATUS_REG) &
1762 		USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1763 }
1764 
1765 /**
1766  * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1767  * @gadget: USB slave device.
1768  * @is_on: 0 to disable pullup, 1 to enable.
1769  *
1770  * See notes in bcm63xx_select_pullup().
1771  */
1772 static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1773 {
1774 	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1775 	unsigned long flags;
1776 	int i, rc = -EINVAL;
1777 
1778 	spin_lock_irqsave(&udc->lock, flags);
1779 	if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1780 		udc->gadget.speed = USB_SPEED_UNKNOWN;
1781 		udc->ep0state = EP0_REQUEUE;
1782 		bcm63xx_fifo_setup(udc);
1783 		bcm63xx_fifo_reset(udc);
1784 		bcm63xx_ep_setup(udc);
1785 
1786 		bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1787 		for (i = 0; i < BCM63XX_NUM_EP; i++)
1788 			bcm63xx_set_stall(udc, &udc->bep[i], false);
1789 
1790 		bcm63xx_set_ctrl_irqs(udc, true);
1791 		bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1792 		rc = 0;
1793 	} else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1794 		bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1795 
1796 		udc->ep0_req_shutdown = 1;
1797 		spin_unlock_irqrestore(&udc->lock, flags);
1798 
1799 		while (1) {
1800 			schedule_work(&udc->ep0_wq);
1801 			if (udc->ep0state == EP0_SHUTDOWN)
1802 				break;
1803 			msleep(50);
1804 		}
1805 		bcm63xx_set_ctrl_irqs(udc, false);
1806 		cancel_work_sync(&udc->ep0_wq);
1807 		return 0;
1808 	}
1809 
1810 	spin_unlock_irqrestore(&udc->lock, flags);
1811 	return rc;
1812 }
1813 
1814 /**
1815  * bcm63xx_udc_start - Start the controller.
1816  * @gadget: USB slave device.
1817  * @driver: Driver for USB slave devices.
1818  */
1819 static int bcm63xx_udc_start(struct usb_gadget *gadget,
1820 		struct usb_gadget_driver *driver)
1821 {
1822 	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1823 	unsigned long flags;
1824 
1825 	if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1826 	    !driver->setup)
1827 		return -EINVAL;
1828 	if (!udc)
1829 		return -ENODEV;
1830 	if (udc->driver)
1831 		return -EBUSY;
1832 
1833 	spin_lock_irqsave(&udc->lock, flags);
1834 
1835 	set_clocks(udc, true);
1836 	bcm63xx_fifo_setup(udc);
1837 	bcm63xx_ep_init(udc);
1838 	bcm63xx_ep_setup(udc);
1839 	bcm63xx_fifo_reset(udc);
1840 	bcm63xx_select_phy_mode(udc, true);
1841 
1842 	udc->driver = driver;
1843 	driver->driver.bus = NULL;
1844 	udc->gadget.dev.of_node = udc->dev->of_node;
1845 
1846 	spin_unlock_irqrestore(&udc->lock, flags);
1847 
1848 	return 0;
1849 }
1850 
1851 /**
1852  * bcm63xx_udc_stop - Shut down the controller.
1853  * @gadget: USB slave device.
1854  * @driver: Driver for USB slave devices.
1855  */
1856 static int bcm63xx_udc_stop(struct usb_gadget *gadget)
1857 {
1858 	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1859 	unsigned long flags;
1860 
1861 	spin_lock_irqsave(&udc->lock, flags);
1862 
1863 	udc->driver = NULL;
1864 
1865 	/*
1866 	 * If we switch the PHY too abruptly after dropping D+, the host
1867 	 * will often complain:
1868 	 *
1869 	 *     hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1870 	 */
1871 	msleep(100);
1872 
1873 	bcm63xx_select_phy_mode(udc, false);
1874 	set_clocks(udc, false);
1875 
1876 	spin_unlock_irqrestore(&udc->lock, flags);
1877 
1878 	return 0;
1879 }
1880 
1881 static const struct usb_gadget_ops bcm63xx_udc_ops = {
1882 	.get_frame	= bcm63xx_udc_get_frame,
1883 	.pullup		= bcm63xx_udc_pullup,
1884 	.udc_start	= bcm63xx_udc_start,
1885 	.udc_stop	= bcm63xx_udc_stop,
1886 };
1887 
1888 /***********************************************************************
1889  * IRQ handling
1890  ***********************************************************************/
1891 
1892 /**
1893  * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1894  * @udc: Reference to the device controller.
1895  *
1896  * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1897  * The driver never sees the raw control packets coming in on the ep0
1898  * IUDMA channel, but at least we get an interrupt event to tell us that
1899  * new values are waiting in the USBD_STATUS register.
1900  */
1901 static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1902 {
1903 	u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1904 
1905 	udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1906 	udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1907 	udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1908 			 USBD_STATUS_ALTINTF_SHIFT;
1909 	bcm63xx_ep_setup(udc);
1910 }
1911 
1912 /**
1913  * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1914  * @udc: Reference to the device controller.
1915  *
1916  * The link speed update coincides with a SETUP IRQ.  Returns 1 if the
1917  * speed has changed, so that the caller can update the endpoint settings.
1918  */
1919 static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1920 {
1921 	u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1922 	enum usb_device_speed oldspeed = udc->gadget.speed;
1923 
1924 	switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1925 	case BCM63XX_SPD_HIGH:
1926 		udc->gadget.speed = USB_SPEED_HIGH;
1927 		break;
1928 	case BCM63XX_SPD_FULL:
1929 		udc->gadget.speed = USB_SPEED_FULL;
1930 		break;
1931 	default:
1932 		/* this should never happen */
1933 		udc->gadget.speed = USB_SPEED_UNKNOWN;
1934 		dev_err(udc->dev,
1935 			"received SETUP packet with invalid link speed\n");
1936 		return 0;
1937 	}
1938 
1939 	if (udc->gadget.speed != oldspeed) {
1940 		dev_info(udc->dev, "link up, %s-speed mode\n",
1941 			 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1942 		return 1;
1943 	} else {
1944 		return 0;
1945 	}
1946 }
1947 
1948 /**
1949  * bcm63xx_update_wedge - Iterate through wedged endpoints.
1950  * @udc: Reference to the device controller.
1951  * @new_status: true to "refresh" wedge status; false to clear it.
1952  *
1953  * On a SETUP interrupt, we need to manually "refresh" the wedge status
1954  * because the controller hardware is designed to automatically clear
1955  * stalls in response to a CLEAR_FEATURE request from the host.
1956  *
1957  * On a RESET interrupt, we do want to restore all wedged endpoints.
1958  */
1959 static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1960 {
1961 	int i;
1962 
1963 	for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1964 		bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1965 		if (!new_status)
1966 			clear_bit(i, &udc->wedgemap);
1967 	}
1968 }
1969 
1970 /**
1971  * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1972  * @irq: IRQ number (unused).
1973  * @dev_id: Reference to the device controller.
1974  *
1975  * This is where we handle link (VBUS) down, USB reset, speed changes,
1976  * SET_CONFIGURATION, and SET_INTERFACE events.
1977  */
1978 static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1979 {
1980 	struct bcm63xx_udc *udc = dev_id;
1981 	u32 stat;
1982 	bool disconnected = false, bus_reset = false;
1983 
1984 	stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1985 	       usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1986 
1987 	usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1988 
1989 	spin_lock(&udc->lock);
1990 	if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1991 		/* VBUS toggled */
1992 
1993 		if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1994 		      USBD_EVENTS_USB_LINK_MASK) &&
1995 		      udc->gadget.speed != USB_SPEED_UNKNOWN)
1996 			dev_info(udc->dev, "link down\n");
1997 
1998 		udc->gadget.speed = USB_SPEED_UNKNOWN;
1999 		disconnected = true;
2000 	}
2001 	if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
2002 		bcm63xx_fifo_setup(udc);
2003 		bcm63xx_fifo_reset(udc);
2004 		bcm63xx_ep_setup(udc);
2005 
2006 		bcm63xx_update_wedge(udc, false);
2007 
2008 		udc->ep0_req_reset = 1;
2009 		schedule_work(&udc->ep0_wq);
2010 		bus_reset = true;
2011 	}
2012 	if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
2013 		if (bcm63xx_update_link_speed(udc)) {
2014 			bcm63xx_fifo_setup(udc);
2015 			bcm63xx_ep_setup(udc);
2016 		}
2017 		bcm63xx_update_wedge(udc, true);
2018 	}
2019 	if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2020 		bcm63xx_update_cfg_iface(udc);
2021 		udc->ep0_req_set_cfg = 1;
2022 		schedule_work(&udc->ep0_wq);
2023 	}
2024 	if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2025 		bcm63xx_update_cfg_iface(udc);
2026 		udc->ep0_req_set_iface = 1;
2027 		schedule_work(&udc->ep0_wq);
2028 	}
2029 	spin_unlock(&udc->lock);
2030 
2031 	if (disconnected && udc->driver)
2032 		udc->driver->disconnect(&udc->gadget);
2033 	else if (bus_reset && udc->driver)
2034 		usb_gadget_udc_reset(&udc->gadget, udc->driver);
2035 
2036 	return IRQ_HANDLED;
2037 }
2038 
2039 /**
2040  * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2041  * @irq: IRQ number (unused).
2042  * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2043  *
2044  * For the two ep0 channels, we have special handling that triggers the
2045  * ep0 worker thread.  For normal bulk/intr channels, either queue up
2046  * the next buffer descriptor for the transaction (incomplete transaction),
2047  * or invoke the completion callback (complete transactions).
2048  */
2049 static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2050 {
2051 	struct iudma_ch *iudma = dev_id;
2052 	struct bcm63xx_udc *udc = iudma->udc;
2053 	struct bcm63xx_ep *bep;
2054 	struct usb_request *req = NULL;
2055 	struct bcm63xx_req *breq = NULL;
2056 	int rc;
2057 	bool is_done = false;
2058 
2059 	spin_lock(&udc->lock);
2060 
2061 	usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2062 			ENETDMAC_IR_REG, iudma->ch_idx);
2063 	bep = iudma->bep;
2064 	rc = iudma_read(udc, iudma);
2065 
2066 	/* special handling for EP0 RX (0) and TX (1) */
2067 	if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2068 	    iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2069 		req = udc->ep0_request;
2070 		breq = our_req(req);
2071 
2072 		/* a single request could require multiple submissions */
2073 		if (rc >= 0) {
2074 			req->actual += rc;
2075 
2076 			if (req->actual >= req->length || breq->bd_bytes > rc) {
2077 				udc->ep0_req_completed = 1;
2078 				is_done = true;
2079 				schedule_work(&udc->ep0_wq);
2080 
2081 				/* "actual" on a ZLP is 1 byte */
2082 				req->actual = min(req->actual, req->length);
2083 			} else {
2084 				/* queue up the next BD (same request) */
2085 				iudma_write(udc, iudma, breq);
2086 			}
2087 		}
2088 	} else if (!list_empty(&bep->queue)) {
2089 		breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2090 		req = &breq->req;
2091 
2092 		if (rc >= 0) {
2093 			req->actual += rc;
2094 
2095 			if (req->actual >= req->length || breq->bd_bytes > rc) {
2096 				is_done = true;
2097 				list_del(&breq->queue);
2098 
2099 				req->actual = min(req->actual, req->length);
2100 
2101 				if (!list_empty(&bep->queue)) {
2102 					struct bcm63xx_req *next;
2103 
2104 					next = list_first_entry(&bep->queue,
2105 						struct bcm63xx_req, queue);
2106 					iudma_write(udc, iudma, next);
2107 				}
2108 			} else {
2109 				iudma_write(udc, iudma, breq);
2110 			}
2111 		}
2112 	}
2113 	spin_unlock(&udc->lock);
2114 
2115 	if (is_done) {
2116 		usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2117 		if (req->complete)
2118 			req->complete(&bep->ep, req);
2119 	}
2120 
2121 	return IRQ_HANDLED;
2122 }
2123 
2124 /***********************************************************************
2125  * Debug filesystem
2126  ***********************************************************************/
2127 
2128 /*
2129  * bcm63xx_usbd_dbg_show - Show USBD controller state.
2130  * @s: seq_file to which the information will be written.
2131  * @p: Unused.
2132  *
2133  * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2134  */
2135 static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2136 {
2137 	struct bcm63xx_udc *udc = s->private;
2138 
2139 	if (!udc->driver)
2140 		return -ENODEV;
2141 
2142 	seq_printf(s, "ep0 state: %s\n",
2143 		   bcm63xx_ep0_state_names[udc->ep0state]);
2144 	seq_printf(s, "  pending requests: %s%s%s%s%s%s%s\n",
2145 		   udc->ep0_req_reset ? "reset " : "",
2146 		   udc->ep0_req_set_cfg ? "set_cfg " : "",
2147 		   udc->ep0_req_set_iface ? "set_iface " : "",
2148 		   udc->ep0_req_shutdown ? "shutdown " : "",
2149 		   udc->ep0_request ? "pending " : "",
2150 		   udc->ep0_req_completed ? "completed " : "",
2151 		   udc->ep0_reply ? "reply " : "");
2152 	seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2153 		   udc->cfg, udc->iface, udc->alt_iface);
2154 	seq_printf(s, "regs:\n");
2155 	seq_printf(s, "  control: %08x; straps: %08x; status: %08x\n",
2156 		   usbd_readl(udc, USBD_CONTROL_REG),
2157 		   usbd_readl(udc, USBD_STRAPS_REG),
2158 		   usbd_readl(udc, USBD_STATUS_REG));
2159 	seq_printf(s, "  events:  %08x; stall:  %08x\n",
2160 		   usbd_readl(udc, USBD_EVENTS_REG),
2161 		   usbd_readl(udc, USBD_STALL_REG));
2162 
2163 	return 0;
2164 }
2165 
2166 /*
2167  * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2168  * @s: seq_file to which the information will be written.
2169  * @p: Unused.
2170  *
2171  * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2172  */
2173 static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2174 {
2175 	struct bcm63xx_udc *udc = s->private;
2176 	int ch_idx, i;
2177 	u32 sram2, sram3;
2178 
2179 	if (!udc->driver)
2180 		return -ENODEV;
2181 
2182 	for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2183 		struct iudma_ch *iudma = &udc->iudma[ch_idx];
2184 		struct list_head *pos;
2185 
2186 		seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2187 		switch (iudma_defaults[ch_idx].ep_type) {
2188 		case BCMEP_CTRL:
2189 			seq_printf(s, "control");
2190 			break;
2191 		case BCMEP_BULK:
2192 			seq_printf(s, "bulk");
2193 			break;
2194 		case BCMEP_INTR:
2195 			seq_printf(s, "interrupt");
2196 			break;
2197 		}
2198 		seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2199 		seq_printf(s, " [ep%d]:\n",
2200 			   max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2201 		seq_printf(s, "  cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2202 			   usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2203 			   usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2204 			   usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2205 			   usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2206 
2207 		sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2208 		sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2209 		seq_printf(s, "  base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2210 			   usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2211 			   sram2 >> 16, sram2 & 0xffff,
2212 			   sram3 >> 16, sram3 & 0xffff,
2213 			   usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2214 		seq_printf(s, "  desc: %d/%d used", iudma->n_bds_used,
2215 			   iudma->n_bds);
2216 
2217 		if (iudma->bep) {
2218 			i = 0;
2219 			list_for_each(pos, &iudma->bep->queue)
2220 				i++;
2221 			seq_printf(s, "; %d queued\n", i);
2222 		} else {
2223 			seq_printf(s, "\n");
2224 		}
2225 
2226 		for (i = 0; i < iudma->n_bds; i++) {
2227 			struct bcm_enet_desc *d = &iudma->bd_ring[i];
2228 
2229 			seq_printf(s, "  %03x (%02x): len_stat: %04x_%04x; pa %08x",
2230 				   i * sizeof(*d), i,
2231 				   d->len_stat >> 16, d->len_stat & 0xffff,
2232 				   d->address);
2233 			if (d == iudma->read_bd)
2234 				seq_printf(s, "   <<RD");
2235 			if (d == iudma->write_bd)
2236 				seq_printf(s, "   <<WR");
2237 			seq_printf(s, "\n");
2238 		}
2239 
2240 		seq_printf(s, "\n");
2241 	}
2242 
2243 	return 0;
2244 }
2245 
2246 static int bcm63xx_usbd_dbg_open(struct inode *inode, struct file *file)
2247 {
2248 	return single_open(file, bcm63xx_usbd_dbg_show, inode->i_private);
2249 }
2250 
2251 static int bcm63xx_iudma_dbg_open(struct inode *inode, struct file *file)
2252 {
2253 	return single_open(file, bcm63xx_iudma_dbg_show, inode->i_private);
2254 }
2255 
2256 static const struct file_operations usbd_dbg_fops = {
2257 	.owner		= THIS_MODULE,
2258 	.open		= bcm63xx_usbd_dbg_open,
2259 	.llseek		= seq_lseek,
2260 	.read		= seq_read,
2261 	.release	= single_release,
2262 };
2263 
2264 static const struct file_operations iudma_dbg_fops = {
2265 	.owner		= THIS_MODULE,
2266 	.open		= bcm63xx_iudma_dbg_open,
2267 	.llseek		= seq_lseek,
2268 	.read		= seq_read,
2269 	.release	= single_release,
2270 };
2271 
2272 
2273 /**
2274  * bcm63xx_udc_init_debugfs - Create debugfs entries.
2275  * @udc: Reference to the device controller.
2276  */
2277 static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2278 {
2279 	struct dentry *root, *usbd, *iudma;
2280 
2281 	if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2282 		return;
2283 
2284 	root = debugfs_create_dir(udc->gadget.name, NULL);
2285 	if (IS_ERR(root) || !root)
2286 		goto err_root;
2287 
2288 	usbd = debugfs_create_file("usbd", 0400, root, udc,
2289 			&usbd_dbg_fops);
2290 	if (!usbd)
2291 		goto err_usbd;
2292 	iudma = debugfs_create_file("iudma", 0400, root, udc,
2293 			&iudma_dbg_fops);
2294 	if (!iudma)
2295 		goto err_iudma;
2296 
2297 	udc->debugfs_root = root;
2298 	udc->debugfs_usbd = usbd;
2299 	udc->debugfs_iudma = iudma;
2300 	return;
2301 err_iudma:
2302 	debugfs_remove(usbd);
2303 err_usbd:
2304 	debugfs_remove(root);
2305 err_root:
2306 	dev_err(udc->dev, "debugfs is not available\n");
2307 }
2308 
2309 /**
2310  * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2311  * @udc: Reference to the device controller.
2312  *
2313  * debugfs_remove() is safe to call with a NULL argument.
2314  */
2315 static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2316 {
2317 	debugfs_remove(udc->debugfs_iudma);
2318 	debugfs_remove(udc->debugfs_usbd);
2319 	debugfs_remove(udc->debugfs_root);
2320 	udc->debugfs_iudma = NULL;
2321 	udc->debugfs_usbd = NULL;
2322 	udc->debugfs_root = NULL;
2323 }
2324 
2325 /***********************************************************************
2326  * Driver init/exit
2327  ***********************************************************************/
2328 
2329 /**
2330  * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2331  * @pdev: Platform device struct from the bcm63xx BSP code.
2332  *
2333  * Note that platform data is required, because pd.port_no varies from chip
2334  * to chip and is used to switch the correct USB port to device mode.
2335  */
2336 static int bcm63xx_udc_probe(struct platform_device *pdev)
2337 {
2338 	struct device *dev = &pdev->dev;
2339 	struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
2340 	struct bcm63xx_udc *udc;
2341 	struct resource *res;
2342 	int rc = -ENOMEM, i, irq;
2343 
2344 	udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2345 	if (!udc)
2346 		return -ENOMEM;
2347 
2348 	platform_set_drvdata(pdev, udc);
2349 	udc->dev = dev;
2350 	udc->pd = pd;
2351 
2352 	if (!pd) {
2353 		dev_err(dev, "missing platform data\n");
2354 		return -EINVAL;
2355 	}
2356 
2357 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2358 	udc->usbd_regs = devm_ioremap_resource(dev, res);
2359 	if (IS_ERR(udc->usbd_regs))
2360 		return PTR_ERR(udc->usbd_regs);
2361 
2362 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2363 	udc->iudma_regs = devm_ioremap_resource(dev, res);
2364 	if (IS_ERR(udc->iudma_regs))
2365 		return PTR_ERR(udc->iudma_regs);
2366 
2367 	spin_lock_init(&udc->lock);
2368 	INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2369 
2370 	udc->gadget.ops = &bcm63xx_udc_ops;
2371 	udc->gadget.name = dev_name(dev);
2372 
2373 	if (!pd->use_fullspeed && !use_fullspeed)
2374 		udc->gadget.max_speed = USB_SPEED_HIGH;
2375 	else
2376 		udc->gadget.max_speed = USB_SPEED_FULL;
2377 
2378 	/* request clocks, allocate buffers, and clear any pending IRQs */
2379 	rc = bcm63xx_init_udc_hw(udc);
2380 	if (rc)
2381 		return rc;
2382 
2383 	rc = -ENXIO;
2384 
2385 	/* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2386 	irq = platform_get_irq(pdev, 0);
2387 	if (irq < 0) {
2388 		dev_err(dev, "missing IRQ resource #0\n");
2389 		goto out_uninit;
2390 	}
2391 	if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2392 			     dev_name(dev), udc) < 0) {
2393 		dev_err(dev, "error requesting IRQ #%d\n", irq);
2394 		goto out_uninit;
2395 	}
2396 
2397 	/* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2398 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2399 		irq = platform_get_irq(pdev, i + 1);
2400 		if (irq < 0) {
2401 			dev_err(dev, "missing IRQ resource #%d\n", i + 1);
2402 			goto out_uninit;
2403 		}
2404 		if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2405 				     dev_name(dev), &udc->iudma[i]) < 0) {
2406 			dev_err(dev, "error requesting IRQ #%d\n", irq);
2407 			goto out_uninit;
2408 		}
2409 	}
2410 
2411 	bcm63xx_udc_init_debugfs(udc);
2412 	rc = usb_add_gadget_udc(dev, &udc->gadget);
2413 	if (!rc)
2414 		return 0;
2415 
2416 	bcm63xx_udc_cleanup_debugfs(udc);
2417 out_uninit:
2418 	bcm63xx_uninit_udc_hw(udc);
2419 	return rc;
2420 }
2421 
2422 /**
2423  * bcm63xx_udc_remove - Remove the device from the system.
2424  * @pdev: Platform device struct from the bcm63xx BSP code.
2425  */
2426 static int bcm63xx_udc_remove(struct platform_device *pdev)
2427 {
2428 	struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2429 
2430 	bcm63xx_udc_cleanup_debugfs(udc);
2431 	usb_del_gadget_udc(&udc->gadget);
2432 	BUG_ON(udc->driver);
2433 
2434 	bcm63xx_uninit_udc_hw(udc);
2435 
2436 	return 0;
2437 }
2438 
2439 static struct platform_driver bcm63xx_udc_driver = {
2440 	.probe		= bcm63xx_udc_probe,
2441 	.remove		= bcm63xx_udc_remove,
2442 	.driver		= {
2443 		.name	= DRV_MODULE_NAME,
2444 	},
2445 };
2446 module_platform_driver(bcm63xx_udc_driver);
2447 
2448 MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2449 MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2450 MODULE_LICENSE("GPL");
2451 MODULE_ALIAS("platform:" DRV_MODULE_NAME);
2452