1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Renesas USBF USB Function driver
4  *
5  * Copyright 2022 Schneider Electric
6  * Author: Herve Codina <herve.codina@bootlin.com>
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/iopoll.h>
13 #include <linux/kernel.h>
14 #include <linux/kfifo.h>
15 #include <linux/module.h>
16 #include <linux/of_address.h>
17 #include <linux/of_irq.h>
18 #include <linux/of_platform.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/types.h>
21 #include <linux/usb/composite.h>
22 #include <linux/usb/gadget.h>
23 #include <linux/usb/role.h>
24 
25 #define USBF_NUM_ENDPOINTS	16
26 #define USBF_EP0_MAX_PCKT_SIZE	64
27 
28 /* EPC registers */
29 #define USBF_REG_USB_CONTROL	0x000
30 #define     USBF_USB_PUE2		BIT(2)
31 #define     USBF_USB_CONNECTB		BIT(3)
32 #define     USBF_USB_DEFAULT		BIT(4)
33 #define     USBF_USB_CONF		BIT(5)
34 #define     USBF_USB_SUSPEND		BIT(6)
35 #define     USBF_USB_RSUM_IN		BIT(7)
36 #define     USBF_USB_SOF_RCV		BIT(8)
37 #define     USBF_USB_FORCEFS		BIT(9)
38 #define     USBF_USB_INT_SEL		BIT(10)
39 #define     USBF_USB_SOF_CLK_MODE	BIT(11)
40 
41 #define USBF_REG_USB_STATUS	0x004
42 #define     USBF_USB_RSUM_OUT		BIT(1)
43 #define     USBF_USB_SPND_OUT		BIT(2)
44 #define     USBF_USB_USB_RST		BIT(3)
45 #define     USBF_USB_DEFAULT_ST		BIT(4)
46 #define     USBF_USB_CONF_ST		BIT(5)
47 #define     USBF_USB_SPEED_MODE		BIT(6)
48 #define     USBF_USB_SOF_DELAY_STATUS	BIT(31)
49 
50 #define USBF_REG_USB_ADDRESS	0x008
51 #define     USBF_USB_SOF_STATUS		BIT(15)
52 #define     USBF_USB_SET_USB_ADDR(_a)	((_a) << 16)
53 #define     USBF_USB_GET_FRAME(_r)	((_r) & 0x7FF)
54 
55 #define USBF_REG_SETUP_DATA0	0x018
56 #define USBF_REG_SETUP_DATA1	0x01C
57 #define USBF_REG_USB_INT_STA	0x020
58 #define     USBF_USB_RSUM_INT		BIT(1)
59 #define     USBF_USB_SPND_INT		BIT(2)
60 #define     USBF_USB_USB_RST_INT	BIT(3)
61 #define     USBF_USB_SOF_INT		BIT(4)
62 #define     USBF_USB_SOF_ERROR_INT	BIT(5)
63 #define     USBF_USB_SPEED_MODE_INT	BIT(6)
64 #define     USBF_USB_EPN_INT(_n)	(BIT(8) << (_n)) /* n=0..15 */
65 
66 #define USBF_REG_USB_INT_ENA	0x024
67 #define     USBF_USB_RSUM_EN		BIT(1)
68 #define     USBF_USB_SPND_EN		BIT(2)
69 #define     USBF_USB_USB_RST_EN		BIT(3)
70 #define     USBF_USB_SOF_EN		BIT(4)
71 #define     USBF_USB_SOF_ERROR_EN	BIT(5)
72 #define     USBF_USB_SPEED_MODE_EN	BIT(6)
73 #define     USBF_USB_EPN_EN(_n)		(BIT(8) << (_n)) /* n=0..15 */
74 
75 #define USBF_BASE_EP0		0x028
76 /* EP0 registers offsets from Base + USBF_BASE_EP0 (EP0 regs area) */
77 #define     USBF_REG_EP0_CONTROL	0x00
78 #define         USBF_EP0_ONAK			BIT(0)
79 #define         USBF_EP0_INAK			BIT(1)
80 #define         USBF_EP0_STL			BIT(2)
81 #define         USBF_EP0_PERR_NAK_CLR		BIT(3)
82 #define         USBF_EP0_INAK_EN		BIT(4)
83 #define         USBF_EP0_DW_MASK		(0x3 << 5)
84 #define         USBF_EP0_DW(_s)			((_s) << 5)
85 #define         USBF_EP0_DEND			BIT(7)
86 #define         USBF_EP0_BCLR			BIT(8)
87 #define         USBF_EP0_PIDCLR			BIT(9)
88 #define         USBF_EP0_AUTO			BIT(16)
89 #define         USBF_EP0_OVERSEL		BIT(17)
90 #define         USBF_EP0_STGSEL			BIT(18)
91 
92 #define     USBF_REG_EP0_STATUS		0x04
93 #define         USBF_EP0_SETUP_INT		BIT(0)
94 #define         USBF_EP0_STG_START_INT		BIT(1)
95 #define         USBF_EP0_STG_END_INT		BIT(2)
96 #define         USBF_EP0_STALL_INT		BIT(3)
97 #define         USBF_EP0_IN_INT			BIT(4)
98 #define         USBF_EP0_OUT_INT		BIT(5)
99 #define         USBF_EP0_OUT_OR_INT		BIT(6)
100 #define         USBF_EP0_OUT_NULL_INT		BIT(7)
101 #define         USBF_EP0_IN_EMPTY		BIT(8)
102 #define         USBF_EP0_IN_FULL		BIT(9)
103 #define         USBF_EP0_IN_DATA		BIT(10)
104 #define         USBF_EP0_IN_NAK_INT		BIT(11)
105 #define         USBF_EP0_OUT_EMPTY		BIT(12)
106 #define         USBF_EP0_OUT_FULL		BIT(13)
107 #define         USBF_EP0_OUT_NULL		BIT(14)
108 #define         USBF_EP0_OUT_NAK_INT		BIT(15)
109 #define         USBF_EP0_PERR_NAK_INT		BIT(16)
110 #define         USBF_EP0_PERR_NAK		BIT(17)
111 #define         USBF_EP0_PID			BIT(18)
112 
113 #define     USBF_REG_EP0_INT_ENA	0x08
114 #define         USBF_EP0_SETUP_EN		BIT(0)
115 #define         USBF_EP0_STG_START_EN		BIT(1)
116 #define         USBF_EP0_STG_END_EN		BIT(2)
117 #define         USBF_EP0_STALL_EN		BIT(3)
118 #define         USBF_EP0_IN_EN			BIT(4)
119 #define         USBF_EP0_OUT_EN			BIT(5)
120 #define         USBF_EP0_OUT_OR_EN		BIT(6)
121 #define         USBF_EP0_OUT_NULL_EN		BIT(7)
122 #define         USBF_EP0_IN_NAK_EN		BIT(11)
123 #define         USBF_EP0_OUT_NAK_EN		BIT(15)
124 #define         USBF_EP0_PERR_NAK_EN		BIT(16)
125 
126 #define     USBF_REG_EP0_LENGTH		0x0C
127 #define         USBF_EP0_LDATA			(0x7FF << 0)
128 #define     USBF_REG_EP0_READ		0x10
129 #define     USBF_REG_EP0_WRITE		0x14
130 
131 #define USBF_BASE_EPN(_n)	(0x040 + (_n) * 0x020)
132 /* EPn registers offsets from Base + USBF_BASE_EPN(n-1). n=1..15 */
133 #define     USBF_REG_EPN_CONTROL	0x000
134 #define         USBF_EPN_ONAK			BIT(0)
135 #define         USBF_EPN_OSTL			BIT(2)
136 #define         USBF_EPN_ISTL			BIT(3)
137 #define         USBF_EPN_OSTL_EN		BIT(4)
138 #define         USBF_EPN_DW_MASK		(0x3 << 5)
139 #define         USBF_EPN_DW(_s)			((_s) << 5)
140 #define         USBF_EPN_DEND			BIT(7)
141 #define         USBF_EPN_CBCLR			BIT(8)
142 #define         USBF_EPN_BCLR			BIT(9)
143 #define         USBF_EPN_OPIDCLR		BIT(10)
144 #define         USBF_EPN_IPIDCLR		BIT(11)
145 #define         USBF_EPN_AUTO			BIT(16)
146 #define         USBF_EPN_OVERSEL		BIT(17)
147 #define         USBF_EPN_MODE_MASK		(0x3 << 24)
148 #define         USBF_EPN_MODE_BULK		(0x0 << 24)
149 #define         USBF_EPN_MODE_INTR		(0x1 << 24)
150 #define         USBF_EPN_MODE_ISO		(0x2 << 24)
151 #define         USBF_EPN_DIR0			BIT(26)
152 #define         USBF_EPN_BUF_TYPE_DOUBLE	BIT(30)
153 #define         USBF_EPN_EN			BIT(31)
154 
155 #define     USBF_REG_EPN_STATUS		0x004
156 #define         USBF_EPN_IN_EMPTY		BIT(0)
157 #define         USBF_EPN_IN_FULL		BIT(1)
158 #define         USBF_EPN_IN_DATA		BIT(2)
159 #define         USBF_EPN_IN_INT			BIT(3)
160 #define         USBF_EPN_IN_STALL_INT		BIT(4)
161 #define         USBF_EPN_IN_NAK_ERR_INT		BIT(5)
162 #define         USBF_EPN_IN_END_INT		BIT(7)
163 #define         USBF_EPN_IPID			BIT(10)
164 #define         USBF_EPN_OUT_EMPTY		BIT(16)
165 #define         USBF_EPN_OUT_FULL		BIT(17)
166 #define         USBF_EPN_OUT_NULL_INT		BIT(18)
167 #define         USBF_EPN_OUT_INT		BIT(19)
168 #define         USBF_EPN_OUT_STALL_INT		BIT(20)
169 #define         USBF_EPN_OUT_NAK_ERR_INT	BIT(21)
170 #define         USBF_EPN_OUT_OR_INT		BIT(22)
171 #define         USBF_EPN_OUT_END_INT		BIT(23)
172 #define         USBF_EPN_ISO_CRC		BIT(24)
173 #define         USBF_EPN_ISO_OR			BIT(26)
174 #define         USBF_EPN_OUT_NOTKN		BIT(27)
175 #define         USBF_EPN_ISO_OPID		BIT(28)
176 #define         USBF_EPN_ISO_PIDERR		BIT(29)
177 
178 #define     USBF_REG_EPN_INT_ENA	0x008
179 #define         USBF_EPN_IN_EN			BIT(3)
180 #define         USBF_EPN_IN_STALL_EN		BIT(4)
181 #define         USBF_EPN_IN_NAK_ERR_EN		BIT(5)
182 #define         USBF_EPN_IN_END_EN		BIT(7)
183 #define         USBF_EPN_OUT_NULL_EN		BIT(18)
184 #define         USBF_EPN_OUT_EN			BIT(19)
185 #define         USBF_EPN_OUT_STALL_EN		BIT(20)
186 #define         USBF_EPN_OUT_NAK_ERR_EN		BIT(21)
187 #define         USBF_EPN_OUT_OR_EN		BIT(22)
188 #define         USBF_EPN_OUT_END_EN		BIT(23)
189 
190 #define     USBF_REG_EPN_DMA_CTRL	0x00C
191 #define         USBF_EPN_DMAMODE0		BIT(0)
192 #define         USBF_EPN_DMA_EN			BIT(4)
193 #define         USBF_EPN_STOP_SET		BIT(8)
194 #define         USBF_EPN_BURST_SET		BIT(9)
195 #define         USBF_EPN_DEND_SET		BIT(10)
196 #define         USBF_EPN_STOP_MODE		BIT(11)
197 
198 #define     USBF_REG_EPN_PCKT_ADRS	0x010
199 #define         USBF_EPN_MPKT(_l)		((_l) << 0)
200 #define         USBF_EPN_BASEAD(_a)		((_a) << 16)
201 
202 #define     USBF_REG_EPN_LEN_DCNT	0x014
203 #define         USBF_EPN_GET_LDATA(_r)		((_r) & 0x7FF)
204 #define         USBF_EPN_SET_DMACNT(_c)		((_c) << 16)
205 #define         USBF_EPN_GET_DMACNT(_r)		(((_r) >> 16) & 0x1ff)
206 
207 #define     USBF_REG_EPN_READ		0x018
208 #define     USBF_REG_EPN_WRITE		0x01C
209 
210 /* AHB-EPC Bridge registers */
211 #define USBF_REG_AHBSCTR	0x1000
212 #define USBF_REG_AHBMCTR	0x1004
213 #define     USBF_SYS_WBURST_TYPE	BIT(2)
214 #define     USBF_SYS_ARBITER_CTR	BIT(31)
215 
216 #define USBF_REG_AHBBINT	0x1008
217 #define     USBF_SYS_ERR_MASTER		 (0x0F << 0)
218 #define     USBF_SYS_SBUS_ERRINT0	 BIT(4)
219 #define     USBF_SYS_SBUS_ERRINT1	 BIT(5)
220 #define     USBF_SYS_MBUS_ERRINT	 BIT(6)
221 #define     USBF_SYS_VBUS_INT		 BIT(13)
222 #define     USBF_SYS_DMA_ENDINT_EPN(_n)	 (BIT(16) << (_n)) /* _n=1..15 */
223 
224 #define USBF_REG_AHBBINTEN	0x100C
225 #define     USBF_SYS_SBUS_ERRINT0EN	  BIT(4)
226 #define     USBF_SYS_SBUS_ERRINT1EN	  BIT(5)
227 #define     USBF_SYS_MBUS_ERRINTEN	  BIT(6)
228 #define     USBF_SYS_VBUS_INTEN		  BIT(13)
229 #define     USBF_SYS_DMA_ENDINTEN_EPN(_n) (BIT(16) << (_n)) /* _n=1..15 */
230 
231 #define USBF_REG_EPCTR		0x1010
232 #define     USBF_SYS_EPC_RST		BIT(0)
233 #define     USBF_SYS_PLL_RST		BIT(2)
234 #define     USBF_SYS_PLL_LOCK		BIT(4)
235 #define     USBF_SYS_PLL_RESUME		BIT(5)
236 #define     USBF_SYS_VBUS_LEVEL		BIT(8)
237 #define     USBF_SYS_DIRPD		BIT(12)
238 
239 #define USBF_REG_USBSSVER	0x1020
240 #define USBF_REG_USBSSCONF	0x1024
241 #define    USBF_SYS_DMA_AVAILABLE(_n)	(BIT(0) << (_n)) /* _n=0..15 */
242 #define    USBF_SYS_EP_AVAILABLE(_n)	(BIT(16) << (_n)) /* _n=0..15 */
243 
244 #define USBF_BASE_DMA_EPN(_n)	(0x1110 + (_n) * 0x010)
245 /* EPn DMA registers offsets from Base USBF_BASE_DMA_EPN(n-1). n=1..15*/
246 #define     USBF_REG_DMA_EPN_DCR1	0x00
247 #define         USBF_SYS_EPN_REQEN		BIT(0)
248 #define         USBF_SYS_EPN_DIR0		BIT(1)
249 #define         USBF_SYS_EPN_SET_DMACNT(_c)	((_c) << 16)
250 #define         USBF_SYS_EPN_GET_DMACNT(_r)	(((_r) >> 16) & 0x0FF)
251 
252 #define     USBF_REG_DMA_EPN_DCR2	0x04
253 #define         USBF_SYS_EPN_MPKT(_s)		((_s) << 0)
254 #define         USBF_SYS_EPN_LMPKT(_l)		((_l) << 16)
255 
256 #define     USBF_REG_DMA_EPN_TADR	0x08
257 
258 /* USB request */
259 struct usbf_req {
260 	struct usb_request	req;
261 	struct list_head	queue;
262 	unsigned int		is_zero_sent : 1;
263 	unsigned int		is_mapped : 1;
264 	enum {
265 		USBF_XFER_START,
266 		USBF_XFER_WAIT_DMA,
267 		USBF_XFER_SEND_NULL,
268 		USBF_XFER_WAIT_END,
269 		USBF_XFER_WAIT_DMA_SHORT,
270 		USBF_XFER_WAIT_BRIDGE,
271 	}			xfer_step;
272 	size_t			dma_size;
273 };
274 
275 /* USB Endpoint */
276 struct usbf_ep {
277 	struct usb_ep		ep;
278 	char			name[32];
279 	struct list_head	queue;
280 	unsigned int		is_processing : 1;
281 	unsigned int		is_in : 1;
282 	struct			usbf_udc *udc;
283 	void __iomem		*regs;
284 	void __iomem		*dma_regs;
285 	unsigned int		id : 8;
286 	unsigned int		disabled : 1;
287 	unsigned int		is_wedged : 1;
288 	unsigned int		delayed_status : 1;
289 	u32			status;
290 	void			(*bridge_on_dma_end)(struct usbf_ep *ep);
291 };
292 
293 enum usbf_ep0state {
294 	EP0_IDLE,
295 	EP0_IN_DATA_PHASE,
296 	EP0_OUT_DATA_PHASE,
297 	EP0_OUT_STATUS_START_PHASE,
298 	EP0_OUT_STATUS_PHASE,
299 	EP0_OUT_STATUS_END_PHASE,
300 	EP0_IN_STATUS_START_PHASE,
301 	EP0_IN_STATUS_PHASE,
302 	EP0_IN_STATUS_END_PHASE,
303 };
304 
305 struct usbf_udc {
306 	struct usb_gadget		gadget;
307 	struct usb_gadget_driver	*driver;
308 	struct device			*dev;
309 	void __iomem			*regs;
310 	spinlock_t			lock;
311 	bool				is_remote_wakeup;
312 	bool				is_usb_suspended;
313 	struct usbf_ep			ep[USBF_NUM_ENDPOINTS];
314 	/* for EP0 control messages */
315 	enum usbf_ep0state		ep0state;
316 	struct usbf_req			setup_reply;
317 	u8				ep0_buf[USBF_EP0_MAX_PCKT_SIZE];
318 };
319 
320 struct usbf_ep_info {
321 	const char		*name;
322 	struct usb_ep_caps	caps;
323 	u16			base_addr;
324 	unsigned int		is_double : 1;
325 	u16			maxpacket_limit;
326 };
327 
328 #define USBF_SINGLE_BUFFER 0
329 #define USBF_DOUBLE_BUFFER 1
330 #define USBF_EP_INFO(_name, _caps, _base_addr, _is_double, _maxpacket_limit)  \
331 	{                                                                     \
332 		.name            = _name,                                     \
333 		.caps            = _caps,                                     \
334 		.base_addr       = _base_addr,                                \
335 		.is_double       = _is_double,                                \
336 		.maxpacket_limit = _maxpacket_limit,                          \
337 	}
338 
339 /* This table is computed from the recommended values provided in the SOC
340  * datasheet. The buffer type (single/double) and the endpoint type cannot
341  * be changed. The mapping in internal RAM (base_addr and number of words)
342  * for each endpoints depends on the max packet size and the buffer type.
343  */
344 static const struct usbf_ep_info usbf_ep_info[USBF_NUM_ENDPOINTS] = {
345 	/* ep0: buf @0x0000 64 bytes, fixed 32 words */
346 	[0] = USBF_EP_INFO("ep0-ctrl",
347 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
348 				       USB_EP_CAPS_DIR_ALL),
349 			   0x0000, USBF_SINGLE_BUFFER, USBF_EP0_MAX_PCKT_SIZE),
350 	/* ep1: buf @0x0020, 2 buffers 512 bytes -> (512 * 2 / 4) words */
351 	[1] = USBF_EP_INFO("ep1-bulk",
352 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
353 				       USB_EP_CAPS_DIR_ALL),
354 			   0x0020, USBF_DOUBLE_BUFFER, 512),
355 	/* ep2: buf @0x0120, 2 buffers 512 bytes -> (512 * 2 / 4) words */
356 	[2] = USBF_EP_INFO("ep2-bulk",
357 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
358 				       USB_EP_CAPS_DIR_ALL),
359 			   0x0120, USBF_DOUBLE_BUFFER, 512),
360 	/* ep3: buf @0x0220, 1 buffer 512 bytes -> (512 * 2 / 4) words */
361 	[3] = USBF_EP_INFO("ep3-bulk",
362 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
363 				       USB_EP_CAPS_DIR_ALL),
364 			   0x0220, USBF_SINGLE_BUFFER, 512),
365 	/* ep4: buf @0x02A0, 1 buffer 512 bytes -> (512 * 1 / 4) words */
366 	[4] = USBF_EP_INFO("ep4-bulk",
367 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
368 				       USB_EP_CAPS_DIR_ALL),
369 			   0x02A0, USBF_SINGLE_BUFFER, 512),
370 	/* ep5: buf @0x0320, 1 buffer 512 bytes -> (512 * 2 / 4) words */
371 	[5] = USBF_EP_INFO("ep5-bulk",
372 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
373 				       USB_EP_CAPS_DIR_ALL),
374 			   0x0320, USBF_SINGLE_BUFFER, 512),
375 	/* ep6: buf @0x03A0, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
376 	[6] = USBF_EP_INFO("ep6-int",
377 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
378 				       USB_EP_CAPS_DIR_ALL),
379 			   0x03A0, USBF_SINGLE_BUFFER, 1024),
380 	/* ep7: buf @0x04A0, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
381 	[7] = USBF_EP_INFO("ep7-int",
382 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
383 				       USB_EP_CAPS_DIR_ALL),
384 			   0x04A0, USBF_SINGLE_BUFFER, 1024),
385 	/* ep8: buf @0x0520, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
386 	[8] = USBF_EP_INFO("ep8-int",
387 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
388 				       USB_EP_CAPS_DIR_ALL),
389 			   0x0520, USBF_SINGLE_BUFFER, 1024),
390 	/* ep9: buf @0x0620, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
391 	[9] = USBF_EP_INFO("ep9-int",
392 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
393 				       USB_EP_CAPS_DIR_ALL),
394 			   0x0620, USBF_SINGLE_BUFFER, 1024),
395 	/* ep10: buf @0x0720, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
396 	[10] = USBF_EP_INFO("ep10-iso",
397 			    USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
398 					USB_EP_CAPS_DIR_ALL),
399 			    0x0720, USBF_DOUBLE_BUFFER, 1024),
400 	/* ep11: buf @0x0920, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
401 	[11] = USBF_EP_INFO("ep11-iso",
402 			    USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
403 					USB_EP_CAPS_DIR_ALL),
404 			    0x0920, USBF_DOUBLE_BUFFER, 1024),
405 	/* ep12: buf @0x0B20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
406 	[12] = USBF_EP_INFO("ep12-iso",
407 			    USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
408 					USB_EP_CAPS_DIR_ALL),
409 			    0x0B20, USBF_DOUBLE_BUFFER, 1024),
410 	/* ep13: buf @0x0D20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
411 	[13] = USBF_EP_INFO("ep13-iso",
412 			    USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
413 					USB_EP_CAPS_DIR_ALL),
414 			    0x0D20, USBF_DOUBLE_BUFFER, 1024),
415 	/* ep14: buf @0x0F20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
416 	[14] = USBF_EP_INFO("ep14-iso",
417 			    USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
418 					USB_EP_CAPS_DIR_ALL),
419 			    0x0F20, USBF_DOUBLE_BUFFER, 1024),
420 	/* ep15: buf @0x1120, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
421 	[15] = USBF_EP_INFO("ep15-iso",
422 			    USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
423 					USB_EP_CAPS_DIR_ALL),
424 			    0x1120, USBF_DOUBLE_BUFFER, 1024),
425 };
426 
427 static inline u32 usbf_reg_readl(struct usbf_udc *udc, uint offset)
428 {
429 	return readl(udc->regs + offset);
430 }
431 
432 static inline void usbf_reg_writel(struct usbf_udc *udc, uint offset, u32 val)
433 {
434 	writel(val, udc->regs + offset);
435 }
436 
437 static inline void usbf_reg_bitset(struct usbf_udc *udc, uint offset, u32 set)
438 {
439 	u32 tmp;
440 
441 	tmp = usbf_reg_readl(udc, offset);
442 	tmp |= set;
443 	usbf_reg_writel(udc, offset, tmp);
444 }
445 
446 static inline void usbf_reg_bitclr(struct usbf_udc *udc, uint offset, u32 clr)
447 {
448 	u32 tmp;
449 
450 	tmp = usbf_reg_readl(udc, offset);
451 	tmp &= ~clr;
452 	usbf_reg_writel(udc, offset, tmp);
453 }
454 
455 static inline void usbf_reg_clrset(struct usbf_udc *udc, uint offset,
456 				   u32 clr, u32 set)
457 {
458 	u32 tmp;
459 
460 	tmp = usbf_reg_readl(udc, offset);
461 	tmp &= ~clr;
462 	tmp |= set;
463 	usbf_reg_writel(udc, offset, tmp);
464 }
465 
466 static inline u32 usbf_ep_reg_readl(struct usbf_ep *ep, uint offset)
467 {
468 	return readl(ep->regs + offset);
469 }
470 
471 static inline void usbf_ep_reg_read_rep(struct usbf_ep *ep, uint offset,
472 				       void *dst, uint count)
473 {
474 	readsl(ep->regs + offset, dst, count);
475 }
476 
477 static inline void usbf_ep_reg_writel(struct usbf_ep *ep, uint offset, u32 val)
478 {
479 	writel(val, ep->regs + offset);
480 }
481 
482 static inline void usbf_ep_reg_write_rep(struct usbf_ep *ep, uint offset,
483 					 const void *src, uint count)
484 {
485 	writesl(ep->regs + offset, src, count);
486 }
487 
488 static inline void usbf_ep_reg_bitset(struct usbf_ep *ep, uint offset, u32 set)
489 {
490 	u32 tmp;
491 
492 	tmp = usbf_ep_reg_readl(ep, offset);
493 	tmp |= set;
494 	usbf_ep_reg_writel(ep, offset, tmp);
495 }
496 
497 static inline void usbf_ep_reg_bitclr(struct usbf_ep *ep, uint offset, u32 clr)
498 {
499 	u32 tmp;
500 
501 	tmp = usbf_ep_reg_readl(ep, offset);
502 	tmp &= ~clr;
503 	usbf_ep_reg_writel(ep, offset, tmp);
504 }
505 
506 static inline void usbf_ep_reg_clrset(struct usbf_ep *ep, uint offset,
507 				      u32 clr, u32 set)
508 {
509 	u32 tmp;
510 
511 	tmp = usbf_ep_reg_readl(ep, offset);
512 	tmp &= ~clr;
513 	tmp |= set;
514 	usbf_ep_reg_writel(ep, offset, tmp);
515 }
516 
517 static inline u32 usbf_ep_dma_reg_readl(struct usbf_ep *ep, uint offset)
518 {
519 	return readl(ep->dma_regs + offset);
520 }
521 
522 static inline void usbf_ep_dma_reg_writel(struct usbf_ep *ep, uint offset,
523 					  u32 val)
524 {
525 	writel(val, ep->dma_regs + offset);
526 }
527 
528 static inline void usbf_ep_dma_reg_bitset(struct usbf_ep *ep, uint offset,
529 					  u32 set)
530 {
531 	u32 tmp;
532 
533 	tmp = usbf_ep_dma_reg_readl(ep, offset);
534 	tmp |= set;
535 	usbf_ep_dma_reg_writel(ep, offset, tmp);
536 }
537 
538 static inline void usbf_ep_dma_reg_bitclr(struct usbf_ep *ep, uint offset,
539 					  u32 clr)
540 {
541 	u32 tmp;
542 
543 	tmp = usbf_ep_dma_reg_readl(ep, offset);
544 	tmp &= ~clr;
545 	usbf_ep_dma_reg_writel(ep, offset, tmp);
546 }
547 
548 static void usbf_ep0_send_null(struct usbf_ep *ep0, bool is_data1)
549 {
550 	u32 set;
551 
552 	set = USBF_EP0_DEND;
553 	if (is_data1)
554 		set |= USBF_EP0_PIDCLR;
555 
556 	usbf_ep_reg_bitset(ep0, USBF_REG_EP0_CONTROL, set);
557 }
558 
559 static int usbf_ep0_pio_in(struct usbf_ep *ep0, struct usbf_req *req)
560 {
561 	unsigned int left;
562 	unsigned int nb;
563 	const void *buf;
564 	u32 ctrl;
565 	u32 last;
566 
567 	left = req->req.length - req->req.actual;
568 
569 	if (left == 0) {
570 		if (!req->is_zero_sent) {
571 			if (req->req.length == 0) {
572 				dev_dbg(ep0->udc->dev, "ep0 send null\n");
573 				usbf_ep0_send_null(ep0, false);
574 				req->is_zero_sent = 1;
575 				return -EINPROGRESS;
576 			}
577 			if ((req->req.actual % ep0->ep.maxpacket) == 0) {
578 				if (req->req.zero) {
579 					dev_dbg(ep0->udc->dev, "ep0 send null\n");
580 					usbf_ep0_send_null(ep0, false);
581 					req->is_zero_sent = 1;
582 					return -EINPROGRESS;
583 				}
584 			}
585 		}
586 		return 0;
587 	}
588 
589 	if (left > ep0->ep.maxpacket)
590 		left = ep0->ep.maxpacket;
591 
592 	buf = req->req.buf;
593 	buf += req->req.actual;
594 
595 	nb = left / sizeof(u32);
596 	if (nb) {
597 		usbf_ep_reg_write_rep(ep0, USBF_REG_EP0_WRITE, buf, nb);
598 		buf += (nb * sizeof(u32));
599 		req->req.actual += (nb * sizeof(u32));
600 		left -= (nb * sizeof(u32));
601 	}
602 	ctrl = usbf_ep_reg_readl(ep0, USBF_REG_EP0_CONTROL);
603 	ctrl &= ~USBF_EP0_DW_MASK;
604 	if (left) {
605 		memcpy(&last, buf, left);
606 		usbf_ep_reg_writel(ep0, USBF_REG_EP0_WRITE, last);
607 		ctrl |= USBF_EP0_DW(left);
608 		req->req.actual += left;
609 	}
610 	usbf_ep_reg_writel(ep0, USBF_REG_EP0_CONTROL, ctrl | USBF_EP0_DEND);
611 
612 	dev_dbg(ep0->udc->dev, "ep0 send %u/%u\n",
613 		req->req.actual, req->req.length);
614 
615 	return -EINPROGRESS;
616 }
617 
618 static int usbf_ep0_pio_out(struct usbf_ep *ep0, struct usbf_req *req)
619 {
620 	int req_status = 0;
621 	unsigned int count;
622 	unsigned int recv;
623 	unsigned int left;
624 	unsigned int nb;
625 	void *buf;
626 	u32 last;
627 
628 	if (ep0->status & USBF_EP0_OUT_INT) {
629 		recv = usbf_ep_reg_readl(ep0, USBF_REG_EP0_LENGTH) & USBF_EP0_LDATA;
630 		count = recv;
631 
632 		buf = req->req.buf;
633 		buf += req->req.actual;
634 
635 		left = req->req.length - req->req.actual;
636 
637 		dev_dbg(ep0->udc->dev, "ep0 recv %u, left %u\n", count, left);
638 
639 		if (left > ep0->ep.maxpacket)
640 			left = ep0->ep.maxpacket;
641 
642 		if (count > left) {
643 			req_status = -EOVERFLOW;
644 			count = left;
645 		}
646 
647 		if (count) {
648 			nb = count / sizeof(u32);
649 			if (nb) {
650 				usbf_ep_reg_read_rep(ep0, USBF_REG_EP0_READ,
651 					buf, nb);
652 				buf += (nb * sizeof(u32));
653 				req->req.actual += (nb * sizeof(u32));
654 				count -= (nb * sizeof(u32));
655 			}
656 			if (count) {
657 				last = usbf_ep_reg_readl(ep0, USBF_REG_EP0_READ);
658 				memcpy(buf, &last, count);
659 				req->req.actual += count;
660 			}
661 		}
662 		dev_dbg(ep0->udc->dev, "ep0 recv %u/%u\n",
663 			req->req.actual, req->req.length);
664 
665 		if (req_status) {
666 			dev_dbg(ep0->udc->dev, "ep0 req.status=%d\n", req_status);
667 			req->req.status = req_status;
668 			return 0;
669 		}
670 
671 		if (recv < ep0->ep.maxpacket) {
672 			dev_dbg(ep0->udc->dev, "ep0 short packet\n");
673 			/* This is a short packet -> It is the end */
674 			req->req.status = 0;
675 			return 0;
676 		}
677 
678 		/* The Data stage of a control transfer from an endpoint to the
679 		 * host is complete when the endpoint does one of the following:
680 		 * - Has transferred exactly the expected amount of data
681 		 * - Transfers a packet with a payload size less than
682 		 *   wMaxPacketSize or transfers a zero-length packet
683 		 */
684 		if (req->req.actual == req->req.length) {
685 			req->req.status = 0;
686 			return 0;
687 		}
688 	}
689 
690 	if (ep0->status & USBF_EP0_OUT_NULL_INT) {
691 		/* NULL packet received */
692 		dev_dbg(ep0->udc->dev, "ep0 null packet\n");
693 		if (req->req.actual != req->req.length) {
694 			req->req.status = req->req.short_not_ok ?
695 					  -EREMOTEIO : 0;
696 		} else {
697 			req->req.status = 0;
698 		}
699 		return 0;
700 	}
701 
702 	return -EINPROGRESS;
703 }
704 
705 static void usbf_ep0_fifo_flush(struct usbf_ep *ep0)
706 {
707 	u32 sts;
708 	int ret;
709 
710 	usbf_ep_reg_bitset(ep0, USBF_REG_EP0_CONTROL, USBF_EP0_BCLR);
711 
712 	ret = readl_poll_timeout_atomic(ep0->regs + USBF_REG_EP0_STATUS, sts,
713 		(sts & (USBF_EP0_IN_DATA | USBF_EP0_IN_EMPTY)) == USBF_EP0_IN_EMPTY,
714 		0,  10000);
715 	if (ret)
716 		dev_err(ep0->udc->dev, "ep0 flush fifo timed out\n");
717 
718 }
719 
720 static void usbf_epn_send_null(struct usbf_ep *epn)
721 {
722 	usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL, USBF_EPN_DEND);
723 }
724 
725 static void usbf_epn_send_residue(struct usbf_ep *epn, const void *buf,
726 				  unsigned int size)
727 {
728 	u32 tmp;
729 
730 	memcpy(&tmp, buf, size);
731 	usbf_ep_reg_writel(epn, USBF_REG_EPN_WRITE, tmp);
732 
733 	usbf_ep_reg_clrset(epn, USBF_REG_EPN_CONTROL,
734 				USBF_EPN_DW_MASK,
735 				USBF_EPN_DW(size) | USBF_EPN_DEND);
736 }
737 
738 static int usbf_epn_pio_in(struct usbf_ep *epn, struct usbf_req *req)
739 {
740 	unsigned int left;
741 	unsigned int nb;
742 	const void *buf;
743 
744 	left = req->req.length - req->req.actual;
745 
746 	if (left == 0) {
747 		if (!req->is_zero_sent) {
748 			if (req->req.length == 0) {
749 				dev_dbg(epn->udc->dev, "ep%u send_null\n", epn->id);
750 				usbf_epn_send_null(epn);
751 				req->is_zero_sent = 1;
752 				return -EINPROGRESS;
753 			}
754 			if ((req->req.actual % epn->ep.maxpacket) == 0) {
755 				if (req->req.zero) {
756 					dev_dbg(epn->udc->dev, "ep%u send_null\n",
757 						epn->id);
758 					usbf_epn_send_null(epn);
759 					req->is_zero_sent = 1;
760 					return -EINPROGRESS;
761 				}
762 			}
763 		}
764 		return 0;
765 	}
766 
767 	if (left > epn->ep.maxpacket)
768 		left = epn->ep.maxpacket;
769 
770 	buf = req->req.buf;
771 	buf += req->req.actual;
772 
773 	nb = left / sizeof(u32);
774 	if (nb) {
775 		usbf_ep_reg_write_rep(epn, USBF_REG_EPN_WRITE, buf, nb);
776 		buf += (nb * sizeof(u32));
777 		req->req.actual += (nb * sizeof(u32));
778 		left -= (nb * sizeof(u32));
779 	}
780 
781 	if (left) {
782 		usbf_epn_send_residue(epn, buf, left);
783 		req->req.actual += left;
784 	} else {
785 		usbf_ep_reg_clrset(epn, USBF_REG_EPN_CONTROL,
786 					USBF_EPN_DW_MASK,
787 					USBF_EPN_DEND);
788 	}
789 
790 	dev_dbg(epn->udc->dev, "ep%u send %u/%u\n", epn->id, req->req.actual,
791 		req->req.length);
792 
793 	return -EINPROGRESS;
794 }
795 
796 static void usbf_epn_enable_in_end_int(struct usbf_ep *epn)
797 {
798 	usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_IN_END_EN);
799 }
800 
801 static int usbf_epn_dma_in(struct usbf_ep *epn, struct usbf_req *req)
802 {
803 	unsigned int left;
804 	u32 npkt;
805 	u32 lastpkt;
806 	int ret;
807 
808 	if (!IS_ALIGNED((uintptr_t)req->req.buf, 4)) {
809 		dev_dbg(epn->udc->dev, "ep%u buf unaligned -> fallback pio\n",
810 			epn->id);
811 		return usbf_epn_pio_in(epn, req);
812 	}
813 
814 	left = req->req.length - req->req.actual;
815 
816 	switch (req->xfer_step) {
817 	default:
818 	case USBF_XFER_START:
819 		if (left == 0) {
820 			dev_dbg(epn->udc->dev, "ep%u send null\n", epn->id);
821 			usbf_epn_send_null(epn);
822 			req->xfer_step = USBF_XFER_WAIT_END;
823 			break;
824 		}
825 		if (left < 4) {
826 			dev_dbg(epn->udc->dev, "ep%u send residue %u\n", epn->id,
827 				left);
828 			usbf_epn_send_residue(epn,
829 				req->req.buf + req->req.actual, left);
830 			req->req.actual += left;
831 			req->xfer_step = USBF_XFER_WAIT_END;
832 			break;
833 		}
834 
835 		ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 1);
836 		if (ret < 0) {
837 			dev_err(epn->udc->dev, "usb_gadget_map_request failed (%d)\n",
838 				ret);
839 			return ret;
840 		}
841 		req->is_mapped = 1;
842 
843 		npkt = DIV_ROUND_UP(left, epn->ep.maxpacket);
844 		lastpkt = (left % epn->ep.maxpacket);
845 		if (lastpkt == 0)
846 			lastpkt = epn->ep.maxpacket;
847 		lastpkt &= ~0x3; /* DMA is done on 32bit units */
848 
849 		usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR2,
850 			USBF_SYS_EPN_MPKT(epn->ep.maxpacket) | USBF_SYS_EPN_LMPKT(lastpkt));
851 		usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_TADR,
852 			req->req.dma);
853 		usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
854 			USBF_SYS_EPN_SET_DMACNT(npkt));
855 		usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
856 			USBF_SYS_EPN_REQEN);
857 
858 		usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT, USBF_EPN_SET_DMACNT(npkt));
859 
860 		usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
861 
862 		/* The end of DMA transfer at the USBF level needs to be handle
863 		 * after the detection of the end of DMA transfer at the brige
864 		 * level.
865 		 * To force this sequence, EPN_IN_END_EN will be set by the
866 		 * detection of the end of transfer at bridge level (ie. bridge
867 		 * interrupt).
868 		 */
869 		usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
870 			USBF_EPN_IN_EN | USBF_EPN_IN_END_EN);
871 		epn->bridge_on_dma_end = usbf_epn_enable_in_end_int;
872 
873 		/* Clear any pending IN_END interrupt */
874 		usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(u32)USBF_EPN_IN_END_INT);
875 
876 		usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
877 			USBF_EPN_BURST_SET | USBF_EPN_DMAMODE0);
878 		usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
879 			USBF_EPN_DMA_EN);
880 
881 		req->dma_size = (npkt - 1) * epn->ep.maxpacket + lastpkt;
882 
883 		dev_dbg(epn->udc->dev, "ep%u dma xfer %zu\n", epn->id,
884 			req->dma_size);
885 
886 		req->xfer_step = USBF_XFER_WAIT_DMA;
887 		break;
888 
889 	case USBF_XFER_WAIT_DMA:
890 		if (!(epn->status & USBF_EPN_IN_END_INT)) {
891 			dev_dbg(epn->udc->dev, "ep%u dma not done\n", epn->id);
892 			break;
893 		}
894 		dev_dbg(epn->udc->dev, "ep%u dma done\n", epn->id);
895 
896 		usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 1);
897 		req->is_mapped = 0;
898 
899 		usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
900 
901 		usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
902 			USBF_EPN_IN_END_EN,
903 			USBF_EPN_IN_EN);
904 
905 		req->req.actual += req->dma_size;
906 
907 		left = req->req.length - req->req.actual;
908 		if (left) {
909 			usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(u32)USBF_EPN_IN_INT);
910 
911 			dev_dbg(epn->udc->dev, "ep%u send residue %u\n", epn->id,
912 				left);
913 			usbf_epn_send_residue(epn,
914 				req->req.buf + req->req.actual, left);
915 			req->req.actual += left;
916 			req->xfer_step = USBF_XFER_WAIT_END;
917 			break;
918 		}
919 
920 		if (req->req.actual % epn->ep.maxpacket) {
921 			/* last packet was a short packet. Tell the hardware to
922 			 * send it right now.
923 			 */
924 			dev_dbg(epn->udc->dev, "ep%u send short\n", epn->id);
925 			usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
926 				~(u32)USBF_EPN_IN_INT);
927 			usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL,
928 				USBF_EPN_DEND);
929 
930 			req->xfer_step = USBF_XFER_WAIT_END;
931 			break;
932 		}
933 
934 		/* Last packet size was a maxpacket size
935 		 * Send null packet if needed
936 		 */
937 		if (req->req.zero) {
938 			req->xfer_step = USBF_XFER_SEND_NULL;
939 			break;
940 		}
941 
942 		/* No more action to do. Wait for the end of the USB transfer */
943 		req->xfer_step = USBF_XFER_WAIT_END;
944 		break;
945 
946 	case USBF_XFER_SEND_NULL:
947 		dev_dbg(epn->udc->dev, "ep%u send null\n", epn->id);
948 		usbf_epn_send_null(epn);
949 		req->xfer_step = USBF_XFER_WAIT_END;
950 		break;
951 
952 	case USBF_XFER_WAIT_END:
953 		if (!(epn->status & USBF_EPN_IN_INT)) {
954 			dev_dbg(epn->udc->dev, "ep%u end not done\n", epn->id);
955 			break;
956 		}
957 		dev_dbg(epn->udc->dev, "ep%u send done %u/%u\n", epn->id,
958 			req->req.actual, req->req.length);
959 		req->xfer_step = USBF_XFER_START;
960 		return 0;
961 	}
962 
963 	return -EINPROGRESS;
964 }
965 
966 static void usbf_epn_recv_residue(struct usbf_ep *epn, void *buf,
967 				  unsigned int size)
968 {
969 	u32 last;
970 
971 	last = usbf_ep_reg_readl(epn, USBF_REG_EPN_READ);
972 	memcpy(buf, &last, size);
973 }
974 
975 static int usbf_epn_pio_out(struct usbf_ep *epn, struct usbf_req *req)
976 {
977 	int req_status = 0;
978 	unsigned int count;
979 	unsigned int recv;
980 	unsigned int left;
981 	unsigned int nb;
982 	void *buf;
983 
984 	if (epn->status & USBF_EPN_OUT_INT) {
985 		recv = USBF_EPN_GET_LDATA(
986 			usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
987 		count = recv;
988 
989 		buf = req->req.buf;
990 		buf += req->req.actual;
991 
992 		left = req->req.length - req->req.actual;
993 
994 		dev_dbg(epn->udc->dev, "ep%u recv %u, left %u, mpkt %u\n", epn->id,
995 			recv, left, epn->ep.maxpacket);
996 
997 		if (left > epn->ep.maxpacket)
998 			left = epn->ep.maxpacket;
999 
1000 		if (count > left) {
1001 			req_status = -EOVERFLOW;
1002 			count = left;
1003 		}
1004 
1005 		if (count) {
1006 			nb = count / sizeof(u32);
1007 			if (nb) {
1008 				usbf_ep_reg_read_rep(epn, USBF_REG_EPN_READ,
1009 					buf, nb);
1010 				buf += (nb * sizeof(u32));
1011 				req->req.actual += (nb * sizeof(u32));
1012 				count -= (nb * sizeof(u32));
1013 			}
1014 			if (count) {
1015 				usbf_epn_recv_residue(epn, buf, count);
1016 				req->req.actual += count;
1017 			}
1018 		}
1019 		dev_dbg(epn->udc->dev, "ep%u recv %u/%u\n", epn->id,
1020 			req->req.actual, req->req.length);
1021 
1022 		if (req_status) {
1023 			dev_dbg(epn->udc->dev, "ep%u req.status=%d\n", epn->id,
1024 				req_status);
1025 			req->req.status = req_status;
1026 			return 0;
1027 		}
1028 
1029 		if (recv < epn->ep.maxpacket) {
1030 			dev_dbg(epn->udc->dev, "ep%u short packet\n", epn->id);
1031 			/* This is a short packet -> It is the end */
1032 			req->req.status = 0;
1033 			return 0;
1034 		}
1035 
1036 		/* Request full -> complete */
1037 		if (req->req.actual == req->req.length) {
1038 			req->req.status = 0;
1039 			return 0;
1040 		}
1041 	}
1042 
1043 	if (epn->status & USBF_EPN_OUT_NULL_INT) {
1044 		/* NULL packet received */
1045 		dev_dbg(epn->udc->dev, "ep%u null packet\n", epn->id);
1046 		if (req->req.actual != req->req.length) {
1047 			req->req.status = req->req.short_not_ok ?
1048 					  -EREMOTEIO : 0;
1049 		} else {
1050 			req->req.status = 0;
1051 		}
1052 		return 0;
1053 	}
1054 
1055 	return -EINPROGRESS;
1056 }
1057 
1058 static void usbf_epn_enable_out_end_int(struct usbf_ep *epn)
1059 {
1060 	usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_OUT_END_EN);
1061 }
1062 
1063 static void usbf_epn_process_queue(struct usbf_ep *epn);
1064 
1065 static void usbf_epn_dma_out_send_dma(struct usbf_ep *epn, dma_addr_t addr, u32 npkt, bool is_short)
1066 {
1067 	usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR2, USBF_SYS_EPN_MPKT(epn->ep.maxpacket));
1068 	usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_TADR, addr);
1069 
1070 	if (is_short) {
1071 		usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
1072 				USBF_SYS_EPN_SET_DMACNT(1) | USBF_SYS_EPN_DIR0);
1073 		usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
1074 				USBF_SYS_EPN_REQEN);
1075 
1076 		usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT,
1077 				USBF_EPN_SET_DMACNT(0));
1078 
1079 		/* The end of DMA transfer at the USBF level needs to be handled
1080 		 * after the detection of the end of DMA transfer at the brige
1081 		 * level.
1082 		 * To force this sequence, enabling the OUT_END interrupt will
1083 		 * be donee by the detection of the end of transfer at bridge
1084 		 * level (ie. bridge interrupt).
1085 		 */
1086 		usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
1087 			USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN | USBF_EPN_OUT_END_EN);
1088 		epn->bridge_on_dma_end = usbf_epn_enable_out_end_int;
1089 
1090 		/* Clear any pending OUT_END interrupt */
1091 		usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
1092 			~(u32)USBF_EPN_OUT_END_INT);
1093 
1094 		usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
1095 			USBF_EPN_STOP_MODE | USBF_EPN_STOP_SET | USBF_EPN_DMAMODE0);
1096 		usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
1097 			USBF_EPN_DMA_EN);
1098 		return;
1099 	}
1100 
1101 	usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
1102 		USBF_SYS_EPN_SET_DMACNT(npkt) | USBF_SYS_EPN_DIR0);
1103 	usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
1104 		USBF_SYS_EPN_REQEN);
1105 
1106 	usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT,
1107 		USBF_EPN_SET_DMACNT(npkt));
1108 
1109 	/* Here, the bridge may or may not generate an interrupt to signal the
1110 	 * end of DMA transfer.
1111 	 * Keep only OUT_END interrupt and let handle the bridge later during
1112 	 * the OUT_END processing.
1113 	 */
1114 	usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
1115 		USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN,
1116 		USBF_EPN_OUT_END_EN);
1117 
1118 	/* Disable bridge interrupt. It will be renabled later */
1119 	usbf_reg_bitclr(epn->udc, USBF_REG_AHBBINTEN,
1120 		USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
1121 
1122 	/* Clear any pending DMA_END interrupt at bridge level */
1123 	usbf_reg_writel(epn->udc, USBF_REG_AHBBINT,
1124 		USBF_SYS_DMA_ENDINT_EPN(epn->id));
1125 
1126 	/* Clear any pending OUT_END interrupt */
1127 	usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
1128 		~(u32)USBF_EPN_OUT_END_INT);
1129 
1130 	usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
1131 		USBF_EPN_STOP_MODE | USBF_EPN_STOP_SET | USBF_EPN_DMAMODE0 | USBF_EPN_BURST_SET);
1132 	usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
1133 		USBF_EPN_DMA_EN);
1134 }
1135 
1136 static size_t usbf_epn_dma_out_complete_dma(struct usbf_ep *epn, bool is_short)
1137 {
1138 	u32 dmacnt;
1139 	u32 tmp;
1140 	int ret;
1141 
1142 	/* Restore interrupt mask */
1143 	usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
1144 		USBF_EPN_OUT_END_EN,
1145 		USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
1146 
1147 	if (is_short) {
1148 		/* Nothing more to do when the DMA was for a short packet */
1149 		return 0;
1150 	}
1151 
1152 	/* Enable the bridge interrupt */
1153 	usbf_reg_bitset(epn->udc, USBF_REG_AHBBINTEN,
1154 		USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
1155 
1156 	tmp = usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT);
1157 	dmacnt = USBF_EPN_GET_DMACNT(tmp);
1158 
1159 	if (dmacnt) {
1160 		/* Some packet were not received (halted by a short or a null
1161 		 * packet.
1162 		 * The bridge never raises an interrupt in this case.
1163 		 * Wait for the end of transfer at bridge level
1164 		 */
1165 		ret = readl_poll_timeout_atomic(
1166 			epn->dma_regs + USBF_REG_DMA_EPN_DCR1,
1167 			tmp, (USBF_SYS_EPN_GET_DMACNT(tmp) == dmacnt),
1168 			0,  10000);
1169 		if (ret) {
1170 			dev_err(epn->udc->dev, "ep%u wait bridge timed out\n",
1171 				epn->id);
1172 		}
1173 
1174 		usbf_ep_dma_reg_bitclr(epn, USBF_REG_DMA_EPN_DCR1,
1175 			USBF_SYS_EPN_REQEN);
1176 
1177 		/* The dmacnt value tells how many packet were not transferred
1178 		 * from the maximum number of packet we set for the DMA transfer.
1179 		 * Compute the left DMA size based on this value.
1180 		 */
1181 		return dmacnt * epn->ep.maxpacket;
1182 	}
1183 
1184 	return 0;
1185 }
1186 
1187 static int usbf_epn_dma_out(struct usbf_ep *epn, struct usbf_req *req)
1188 {
1189 	unsigned int dma_left;
1190 	unsigned int count;
1191 	unsigned int recv;
1192 	unsigned int left;
1193 	u32 npkt;
1194 	int ret;
1195 
1196 	if (!IS_ALIGNED((uintptr_t)req->req.buf, 4)) {
1197 		dev_dbg(epn->udc->dev, "ep%u buf unaligned -> fallback pio\n",
1198 			epn->id);
1199 		return usbf_epn_pio_out(epn, req);
1200 	}
1201 
1202 	switch (req->xfer_step) {
1203 	default:
1204 	case USBF_XFER_START:
1205 		if (epn->status & USBF_EPN_OUT_NULL_INT) {
1206 			dev_dbg(epn->udc->dev, "ep%u null packet\n", epn->id);
1207 			if (req->req.actual != req->req.length) {
1208 				req->req.status = req->req.short_not_ok ?
1209 					-EREMOTEIO : 0;
1210 			} else {
1211 				req->req.status = 0;
1212 			}
1213 			return 0;
1214 		}
1215 
1216 		if (!(epn->status & USBF_EPN_OUT_INT)) {
1217 			dev_dbg(epn->udc->dev, "ep%u OUT_INT not set -> spurious\n",
1218 				epn->id);
1219 			break;
1220 		}
1221 
1222 		recv = USBF_EPN_GET_LDATA(
1223 			usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
1224 		if (!recv) {
1225 			dev_dbg(epn->udc->dev, "ep%u recv = 0 -> spurious\n",
1226 				epn->id);
1227 			break;
1228 		}
1229 
1230 		left = req->req.length - req->req.actual;
1231 
1232 		dev_dbg(epn->udc->dev, "ep%u recv %u, left %u, mpkt %u\n", epn->id,
1233 			recv, left, epn->ep.maxpacket);
1234 
1235 		if (recv > left) {
1236 			dev_err(epn->udc->dev, "ep%u overflow (%u/%u)\n",
1237 				epn->id, recv, left);
1238 			req->req.status = -EOVERFLOW;
1239 			return -EOVERFLOW;
1240 		}
1241 
1242 		if (recv < epn->ep.maxpacket) {
1243 			/* Short packet received */
1244 			dev_dbg(epn->udc->dev, "ep%u short packet\n", epn->id);
1245 			if (recv <= 3) {
1246 				usbf_epn_recv_residue(epn,
1247 					req->req.buf + req->req.actual, recv);
1248 				req->req.actual += recv;
1249 
1250 				dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n",
1251 					epn->id, req->req.actual, req->req.length);
1252 
1253 				req->xfer_step = USBF_XFER_START;
1254 				return 0;
1255 			}
1256 
1257 			ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 0);
1258 			if (ret < 0) {
1259 				dev_err(epn->udc->dev, "map request failed (%d)\n",
1260 					ret);
1261 				return ret;
1262 			}
1263 			req->is_mapped = 1;
1264 
1265 			usbf_epn_dma_out_send_dma(epn,
1266 				req->req.dma + req->req.actual,
1267 				1, true);
1268 			req->dma_size = recv & ~0x3;
1269 
1270 			dev_dbg(epn->udc->dev, "ep%u dma short xfer %zu\n", epn->id,
1271 				req->dma_size);
1272 
1273 			req->xfer_step = USBF_XFER_WAIT_DMA_SHORT;
1274 			break;
1275 		}
1276 
1277 		ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 0);
1278 		if (ret < 0) {
1279 			dev_err(epn->udc->dev, "map request failed (%d)\n",
1280 				ret);
1281 			return ret;
1282 		}
1283 		req->is_mapped = 1;
1284 
1285 		/* Use the maximum DMA size according to the request buffer.
1286 		 * We will adjust the received size later at the end of the DMA
1287 		 * transfer with the left size computed from
1288 		 * usbf_epn_dma_out_complete_dma().
1289 		 */
1290 		npkt = left / epn->ep.maxpacket;
1291 		usbf_epn_dma_out_send_dma(epn,
1292 				req->req.dma + req->req.actual,
1293 				npkt, false);
1294 		req->dma_size = npkt * epn->ep.maxpacket;
1295 
1296 		dev_dbg(epn->udc->dev, "ep%u dma xfer %zu (%u)\n", epn->id,
1297 			req->dma_size, npkt);
1298 
1299 		req->xfer_step = USBF_XFER_WAIT_DMA;
1300 		break;
1301 
1302 	case USBF_XFER_WAIT_DMA_SHORT:
1303 		if (!(epn->status & USBF_EPN_OUT_END_INT)) {
1304 			dev_dbg(epn->udc->dev, "ep%u dma short not done\n", epn->id);
1305 			break;
1306 		}
1307 		dev_dbg(epn->udc->dev, "ep%u dma short done\n", epn->id);
1308 
1309 		usbf_epn_dma_out_complete_dma(epn, true);
1310 
1311 		usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
1312 		req->is_mapped = 0;
1313 
1314 		req->req.actual += req->dma_size;
1315 
1316 		recv = USBF_EPN_GET_LDATA(
1317 			usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
1318 
1319 		count = recv & 0x3;
1320 		if (count) {
1321 			dev_dbg(epn->udc->dev, "ep%u recv residue %u\n", epn->id,
1322 				count);
1323 			usbf_epn_recv_residue(epn,
1324 				req->req.buf + req->req.actual, count);
1325 			req->req.actual += count;
1326 		}
1327 
1328 		dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
1329 			req->req.actual, req->req.length);
1330 
1331 		req->xfer_step = USBF_XFER_START;
1332 		return 0;
1333 
1334 	case USBF_XFER_WAIT_DMA:
1335 		if (!(epn->status & USBF_EPN_OUT_END_INT)) {
1336 			dev_dbg(epn->udc->dev, "ep%u dma not done\n", epn->id);
1337 			break;
1338 		}
1339 		dev_dbg(epn->udc->dev, "ep%u dma done\n", epn->id);
1340 
1341 		dma_left = usbf_epn_dma_out_complete_dma(epn, false);
1342 		if (dma_left) {
1343 			/* Adjust the final DMA size with */
1344 			count = req->dma_size - dma_left;
1345 
1346 			dev_dbg(epn->udc->dev, "ep%u dma xfer done %u\n", epn->id,
1347 				count);
1348 
1349 			req->req.actual += count;
1350 
1351 			if (epn->status & USBF_EPN_OUT_NULL_INT) {
1352 				/* DMA was stopped by a null packet reception */
1353 				dev_dbg(epn->udc->dev, "ep%u dma stopped by null pckt\n",
1354 					epn->id);
1355 				usb_gadget_unmap_request(&epn->udc->gadget,
1356 							 &req->req, 0);
1357 				req->is_mapped = 0;
1358 
1359 				usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
1360 					~(u32)USBF_EPN_OUT_NULL_INT);
1361 
1362 				if (req->req.actual != req->req.length) {
1363 					req->req.status = req->req.short_not_ok ?
1364 						  -EREMOTEIO : 0;
1365 				} else {
1366 					req->req.status = 0;
1367 				}
1368 				dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n",
1369 					epn->id, req->req.actual, req->req.length);
1370 				req->xfer_step = USBF_XFER_START;
1371 				return 0;
1372 			}
1373 
1374 			recv = USBF_EPN_GET_LDATA(
1375 				usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
1376 			left = req->req.length - req->req.actual;
1377 			if (recv > left) {
1378 				dev_err(epn->udc->dev,
1379 					"ep%u overflow (%u/%u)\n", epn->id,
1380 					recv, left);
1381 				req->req.status = -EOVERFLOW;
1382 				usb_gadget_unmap_request(&epn->udc->gadget,
1383 							 &req->req, 0);
1384 				req->is_mapped = 0;
1385 
1386 				req->xfer_step = USBF_XFER_START;
1387 				return -EOVERFLOW;
1388 			}
1389 
1390 			if (recv > 3) {
1391 				usbf_epn_dma_out_send_dma(epn,
1392 					req->req.dma + req->req.actual,
1393 					1, true);
1394 				req->dma_size = recv & ~0x3;
1395 
1396 				dev_dbg(epn->udc->dev, "ep%u dma short xfer %zu\n",
1397 					epn->id, req->dma_size);
1398 
1399 				req->xfer_step = USBF_XFER_WAIT_DMA_SHORT;
1400 				break;
1401 			}
1402 
1403 			usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
1404 			req->is_mapped = 0;
1405 
1406 			count = recv & 0x3;
1407 			if (count) {
1408 				dev_dbg(epn->udc->dev, "ep%u recv residue %u\n",
1409 					epn->id, count);
1410 				usbf_epn_recv_residue(epn,
1411 					req->req.buf + req->req.actual, count);
1412 				req->req.actual += count;
1413 			}
1414 
1415 			dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
1416 				req->req.actual, req->req.length);
1417 
1418 			req->xfer_step = USBF_XFER_START;
1419 			return 0;
1420 		}
1421 
1422 		/* Process queue at bridge interrupt only */
1423 		usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
1424 			USBF_EPN_OUT_END_EN | USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
1425 		epn->status = 0;
1426 		epn->bridge_on_dma_end = usbf_epn_process_queue;
1427 
1428 		req->xfer_step = USBF_XFER_WAIT_BRIDGE;
1429 		break;
1430 
1431 	case USBF_XFER_WAIT_BRIDGE:
1432 		dev_dbg(epn->udc->dev, "ep%u bridge transfers done\n", epn->id);
1433 
1434 		/* Restore interrupt mask */
1435 		usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
1436 			USBF_EPN_OUT_END_EN,
1437 			USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
1438 
1439 		usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
1440 		req->is_mapped = 0;
1441 
1442 		req->req.actual += req->dma_size;
1443 
1444 		req->xfer_step = USBF_XFER_START;
1445 		left = req->req.length - req->req.actual;
1446 		if (!left) {
1447 			/* No more data can be added to the buffer */
1448 			dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
1449 				req->req.actual, req->req.length);
1450 			return 0;
1451 		}
1452 		dev_dbg(epn->udc->dev, "ep%u recv done %u/%u, wait more data\n",
1453 			epn->id, req->req.actual, req->req.length);
1454 		break;
1455 	}
1456 
1457 	return -EINPROGRESS;
1458 }
1459 
1460 static void usbf_epn_dma_stop(struct usbf_ep *epn)
1461 {
1462 	usbf_ep_dma_reg_bitclr(epn, USBF_REG_DMA_EPN_DCR1, USBF_SYS_EPN_REQEN);
1463 
1464 	/* In the datasheet:
1465 	 *   If EP[m]_REQEN = 0b is set during DMA transfer, AHB-EPC stops DMA
1466 	 *   after 1 packet transfer completed.
1467 	 *   Therefore, wait sufficient time for ensuring DMA transfer
1468 	 *   completion. The WAIT time depends on the system, especially AHB
1469 	 *   bus activity
1470 	 * So arbitrary 10ms would be sufficient.
1471 	 */
1472 	mdelay(10);
1473 
1474 	usbf_ep_reg_bitclr(epn, USBF_REG_EPN_DMA_CTRL, USBF_EPN_DMA_EN);
1475 }
1476 
1477 static void usbf_epn_dma_abort(struct usbf_ep *epn,  struct usbf_req *req)
1478 {
1479 	dev_dbg(epn->udc->dev, "ep%u %s dma abort\n", epn->id,
1480 		epn->is_in ? "in" : "out");
1481 
1482 	epn->bridge_on_dma_end = NULL;
1483 
1484 	usbf_epn_dma_stop(epn);
1485 
1486 	usb_gadget_unmap_request(&epn->udc->gadget, &req->req,
1487 				 epn->is_in ? 1 : 0);
1488 	req->is_mapped = 0;
1489 
1490 	usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
1491 
1492 	if (epn->is_in) {
1493 		usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
1494 			USBF_EPN_IN_END_EN,
1495 			USBF_EPN_IN_EN);
1496 	} else {
1497 		usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
1498 			USBF_EPN_OUT_END_EN,
1499 			USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
1500 	}
1501 
1502 	/* As dma is stopped, be sure that no DMA interrupt are pending */
1503 	usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
1504 		USBF_EPN_IN_END_INT | USBF_EPN_OUT_END_INT);
1505 
1506 	usbf_reg_writel(epn->udc, USBF_REG_AHBBINT, USBF_SYS_DMA_ENDINT_EPN(epn->id));
1507 
1508 	/* Enable DMA interrupt the bridge level */
1509 	usbf_reg_bitset(epn->udc, USBF_REG_AHBBINTEN,
1510 		USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
1511 
1512 	/* Reset transfer step */
1513 	req->xfer_step = USBF_XFER_START;
1514 }
1515 
1516 static void usbf_epn_fifo_flush(struct usbf_ep *epn)
1517 {
1518 	u32 ctrl;
1519 	u32 sts;
1520 	int ret;
1521 
1522 	dev_dbg(epn->udc->dev, "ep%u %s fifo flush\n", epn->id,
1523 		epn->is_in ? "in" : "out");
1524 
1525 	ctrl = usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL);
1526 	usbf_ep_reg_writel(epn, USBF_REG_EPN_CONTROL, ctrl | USBF_EPN_BCLR);
1527 
1528 	if (ctrl & USBF_EPN_DIR0)
1529 		return;
1530 
1531 	ret = readl_poll_timeout_atomic(epn->regs + USBF_REG_EPN_STATUS, sts,
1532 		(sts & (USBF_EPN_IN_DATA | USBF_EPN_IN_EMPTY)) == USBF_EPN_IN_EMPTY,
1533 		0,  10000);
1534 	if (ret)
1535 		dev_err(epn->udc->dev, "ep%u flush fifo timed out\n", epn->id);
1536 }
1537 
1538 static void usbf_ep_req_done(struct usbf_ep *ep, struct usbf_req *req,
1539 			     int status)
1540 {
1541 	list_del_init(&req->queue);
1542 
1543 	if (status) {
1544 		req->req.status = status;
1545 	} else {
1546 		if (req->req.status == -EINPROGRESS)
1547 			req->req.status = status;
1548 	}
1549 
1550 	dev_dbg(ep->udc->dev, "ep%u %s req done length %u/%u, status=%d\n", ep->id,
1551 		ep->is_in ? "in" : "out",
1552 		req->req.actual, req->req.length, req->req.status);
1553 
1554 	if (req->is_mapped)
1555 		usbf_epn_dma_abort(ep, req);
1556 
1557 	spin_unlock(&ep->udc->lock);
1558 	usb_gadget_giveback_request(&ep->ep, &req->req);
1559 	spin_lock(&ep->udc->lock);
1560 }
1561 
1562 static void usbf_ep_nuke(struct usbf_ep *ep, int status)
1563 {
1564 	struct usbf_req *req;
1565 
1566 	dev_dbg(ep->udc->dev, "ep%u %s nuke status %d\n", ep->id,
1567 		ep->is_in ? "in" : "out",
1568 		status);
1569 
1570 	while (!list_empty(&ep->queue)) {
1571 		req = list_first_entry(&ep->queue, struct usbf_req, queue);
1572 		usbf_ep_req_done(ep, req, status);
1573 	}
1574 
1575 	if (ep->id == 0)
1576 		usbf_ep0_fifo_flush(ep);
1577 	else
1578 		usbf_epn_fifo_flush(ep);
1579 }
1580 
1581 static bool usbf_ep_is_stalled(struct usbf_ep *ep)
1582 {
1583 	u32 ctrl;
1584 
1585 	if (ep->id == 0) {
1586 		ctrl = usbf_ep_reg_readl(ep, USBF_REG_EP0_CONTROL);
1587 		return (ctrl & USBF_EP0_STL) ? true : false;
1588 	}
1589 
1590 	ctrl = usbf_ep_reg_readl(ep, USBF_REG_EPN_CONTROL);
1591 	if (ep->is_in)
1592 		return (ctrl & USBF_EPN_ISTL) ? true : false;
1593 
1594 	return (ctrl & USBF_EPN_OSTL) ? true : false;
1595 }
1596 
1597 static int usbf_epn_start_queue(struct usbf_ep *epn)
1598 {
1599 	struct usbf_req *req;
1600 	int ret;
1601 
1602 	if (usbf_ep_is_stalled(epn))
1603 		return 0;
1604 
1605 	req = list_first_entry_or_null(&epn->queue, struct usbf_req, queue);
1606 
1607 	if (epn->is_in) {
1608 		if (req && !epn->is_processing) {
1609 			ret = epn->dma_regs ?
1610 				usbf_epn_dma_in(epn, req) :
1611 				usbf_epn_pio_in(epn, req);
1612 			if (ret != -EINPROGRESS) {
1613 				dev_err(epn->udc->dev,
1614 					"queued next request not in progress\n");
1615 					/* The request cannot be completed (ie
1616 					 * ret == 0) on the first call.
1617 					 * stall and nuke the endpoint
1618 					 */
1619 				return ret ? ret : -EIO;
1620 			}
1621 		}
1622 	} else {
1623 		if (req) {
1624 			/* Clear ONAK to accept OUT tokens */
1625 			usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL,
1626 				USBF_EPN_ONAK);
1627 
1628 			/* Enable interrupts */
1629 			usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA,
1630 				USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
1631 		} else {
1632 			/* Disable incoming data and interrupt.
1633 			 * They will be enable on next usb_eb_queue call
1634 			 */
1635 			usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL,
1636 				USBF_EPN_ONAK);
1637 			usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
1638 				USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
1639 		}
1640 	}
1641 	return 0;
1642 }
1643 
1644 static int usbf_ep_process_queue(struct usbf_ep *ep)
1645 {
1646 	int (*usbf_ep_xfer)(struct usbf_ep *ep, struct usbf_req *req);
1647 	struct usbf_req *req;
1648 	int is_processing;
1649 	int ret;
1650 
1651 	if (ep->is_in) {
1652 		usbf_ep_xfer = usbf_ep0_pio_in;
1653 		if (ep->id) {
1654 			usbf_ep_xfer = ep->dma_regs ?
1655 					usbf_epn_dma_in : usbf_epn_pio_in;
1656 		}
1657 	} else {
1658 		usbf_ep_xfer = usbf_ep0_pio_out;
1659 		if (ep->id) {
1660 			usbf_ep_xfer = ep->dma_regs ?
1661 					usbf_epn_dma_out : usbf_epn_pio_out;
1662 		}
1663 	}
1664 
1665 	req = list_first_entry_or_null(&ep->queue, struct usbf_req, queue);
1666 	if (!req) {
1667 		dev_err(ep->udc->dev,
1668 			"no request available for ep%u %s process\n", ep->id,
1669 			ep->is_in ? "in" : "out");
1670 		return -ENOENT;
1671 	}
1672 
1673 	do {
1674 		/* Were going to read the FIFO for this current request.
1675 		 * NAK any other incoming data to avoid a race condition if no
1676 		 * more request are available.
1677 		 */
1678 		if (!ep->is_in && ep->id != 0) {
1679 			usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
1680 				USBF_EPN_ONAK);
1681 		}
1682 
1683 		ret = usbf_ep_xfer(ep, req);
1684 		if (ret == -EINPROGRESS) {
1685 			if (!ep->is_in && ep->id != 0) {
1686 				/* The current request needs more data.
1687 				 * Allow incoming data
1688 				 */
1689 				usbf_ep_reg_bitclr(ep, USBF_REG_EPN_CONTROL,
1690 					USBF_EPN_ONAK);
1691 			}
1692 			return ret;
1693 		}
1694 
1695 		is_processing = ep->is_processing;
1696 		ep->is_processing = 1;
1697 		usbf_ep_req_done(ep, req, ret);
1698 		ep->is_processing = is_processing;
1699 
1700 		if (ret) {
1701 			/* An error was detected during the request transfer.
1702 			 * Any pending DMA transfers were aborted by the
1703 			 * usbf_ep_req_done() call.
1704 			 * It's time to flush the fifo
1705 			 */
1706 			if (ep->id == 0)
1707 				usbf_ep0_fifo_flush(ep);
1708 			else
1709 				usbf_epn_fifo_flush(ep);
1710 		}
1711 
1712 		req = list_first_entry_or_null(&ep->queue, struct usbf_req,
1713 					       queue);
1714 
1715 		if (ep->is_in)
1716 			continue;
1717 
1718 		if (ep->id != 0) {
1719 			if (req) {
1720 				/* An other request is available.
1721 				 * Allow incoming data
1722 				 */
1723 				usbf_ep_reg_bitclr(ep, USBF_REG_EPN_CONTROL,
1724 					USBF_EPN_ONAK);
1725 			} else {
1726 				/* No request queued. Disable interrupts.
1727 				 * They will be enabled on usb_ep_queue
1728 				 */
1729 				usbf_ep_reg_bitclr(ep, USBF_REG_EPN_INT_ENA,
1730 					USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
1731 			}
1732 		}
1733 		/* Do not recall usbf_ep_xfer() */
1734 		return req ? -EINPROGRESS : 0;
1735 
1736 	} while (req);
1737 
1738 	return 0;
1739 }
1740 
1741 static void usbf_ep_stall(struct usbf_ep *ep, bool stall)
1742 {
1743 	struct usbf_req *first;
1744 
1745 	dev_dbg(ep->udc->dev, "ep%u %s %s\n", ep->id,
1746 		ep->is_in ? "in" : "out",
1747 		stall ? "stall" : "unstall");
1748 
1749 	if (ep->id == 0) {
1750 		if (stall)
1751 			usbf_ep_reg_bitset(ep, USBF_REG_EP0_CONTROL, USBF_EP0_STL);
1752 		else
1753 			usbf_ep_reg_bitclr(ep, USBF_REG_EP0_CONTROL, USBF_EP0_STL);
1754 		return;
1755 	}
1756 
1757 	if (stall) {
1758 		if (ep->is_in)
1759 			usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
1760 				USBF_EPN_ISTL);
1761 		else
1762 			usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
1763 				USBF_EPN_OSTL | USBF_EPN_OSTL_EN);
1764 	} else {
1765 		first = list_first_entry_or_null(&ep->queue, struct usbf_req, queue);
1766 		if (first && first->is_mapped) {
1767 			/* This can appear if the host halts an endpoint using
1768 			 * SET_FEATURE and then un-halts the endpoint
1769 			 */
1770 			usbf_epn_dma_abort(ep, first);
1771 		}
1772 		usbf_epn_fifo_flush(ep);
1773 		if (ep->is_in) {
1774 			usbf_ep_reg_clrset(ep, USBF_REG_EPN_CONTROL,
1775 				USBF_EPN_ISTL,
1776 				USBF_EPN_IPIDCLR);
1777 		} else {
1778 			usbf_ep_reg_clrset(ep, USBF_REG_EPN_CONTROL,
1779 				USBF_EPN_OSTL,
1780 				USBF_EPN_OSTL_EN | USBF_EPN_OPIDCLR);
1781 		}
1782 		usbf_epn_start_queue(ep);
1783 	}
1784 }
1785 
1786 static void usbf_ep0_enable(struct usbf_ep *ep0)
1787 {
1788 	usbf_ep_reg_writel(ep0, USBF_REG_EP0_CONTROL, USBF_EP0_INAK_EN | USBF_EP0_BCLR);
1789 
1790 	usbf_ep_reg_writel(ep0, USBF_REG_EP0_INT_ENA,
1791 		USBF_EP0_SETUP_EN | USBF_EP0_STG_START_EN | USBF_EP0_STG_END_EN |
1792 		USBF_EP0_OUT_EN | USBF_EP0_OUT_NULL_EN | USBF_EP0_IN_EN);
1793 
1794 	ep0->udc->ep0state = EP0_IDLE;
1795 	ep0->disabled = 0;
1796 
1797 	/* enable interrupts for the ep0 */
1798 	usbf_reg_bitset(ep0->udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(0));
1799 }
1800 
1801 static int usbf_epn_enable(struct usbf_ep *epn)
1802 {
1803 	u32 base_addr;
1804 	u32 ctrl;
1805 
1806 	base_addr = usbf_ep_info[epn->id].base_addr;
1807 	usbf_ep_reg_writel(epn, USBF_REG_EPN_PCKT_ADRS,
1808 		USBF_EPN_BASEAD(base_addr) | USBF_EPN_MPKT(epn->ep.maxpacket));
1809 
1810 	/* OUT transfer interrupt are enabled during usb_ep_queue */
1811 	if (epn->is_in) {
1812 		/* Will be changed in DMA processing */
1813 		usbf_ep_reg_writel(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_IN_EN);
1814 	}
1815 
1816 	/* Clear, set endpoint direction, set IN/OUT STL, and enable
1817 	 * Send NAK for Data out as request are not queued yet
1818 	 */
1819 	ctrl = USBF_EPN_EN | USBF_EPN_BCLR;
1820 	if (epn->is_in)
1821 		ctrl |= USBF_EPN_OSTL | USBF_EPN_OSTL_EN;
1822 	else
1823 		ctrl |= USBF_EPN_DIR0 | USBF_EPN_ISTL | USBF_EPN_OSTL_EN | USBF_EPN_ONAK;
1824 	usbf_ep_reg_writel(epn, USBF_REG_EPN_CONTROL, ctrl);
1825 
1826 	return 0;
1827 }
1828 
1829 static int usbf_ep_enable(struct usb_ep *_ep,
1830 			  const struct usb_endpoint_descriptor *desc)
1831 {
1832 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
1833 	struct usbf_udc *udc = ep->udc;
1834 	unsigned long flags;
1835 	int ret;
1836 
1837 	if (ep->id == 0)
1838 		return -EINVAL;
1839 
1840 	if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
1841 		return -EINVAL;
1842 
1843 	dev_dbg(ep->udc->dev, "ep%u %s mpkts %d\n", ep->id,
1844 		usb_endpoint_dir_in(desc) ? "in" : "out",
1845 		usb_endpoint_maxp(desc));
1846 
1847 	spin_lock_irqsave(&ep->udc->lock, flags);
1848 	ep->is_in = usb_endpoint_dir_in(desc);
1849 	ep->ep.maxpacket = usb_endpoint_maxp(desc);
1850 
1851 	ret = usbf_epn_enable(ep);
1852 	if (ret)
1853 		goto end;
1854 
1855 	ep->disabled = 0;
1856 
1857 	/* enable interrupts for this endpoint */
1858 	usbf_reg_bitset(udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(ep->id));
1859 
1860 	/* enable DMA interrupt at bridge level if DMA is used */
1861 	if (ep->dma_regs) {
1862 		ep->bridge_on_dma_end = NULL;
1863 		usbf_reg_bitset(udc, USBF_REG_AHBBINTEN,
1864 			USBF_SYS_DMA_ENDINTEN_EPN(ep->id));
1865 	}
1866 
1867 	ret = 0;
1868 end:
1869 	spin_unlock_irqrestore(&ep->udc->lock, flags);
1870 	return ret;
1871 }
1872 
1873 static int usbf_epn_disable(struct usbf_ep *epn)
1874 {
1875 	/* Disable interrupts */
1876 	usbf_ep_reg_writel(epn, USBF_REG_EPN_INT_ENA, 0);
1877 
1878 	/* Disable endpoint */
1879 	usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_EN);
1880 
1881 	/* remove anything that was pending */
1882 	usbf_ep_nuke(epn, -ESHUTDOWN);
1883 
1884 	return 0;
1885 }
1886 
1887 static int usbf_ep_disable(struct usb_ep *_ep)
1888 {
1889 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
1890 	struct usbf_udc *udc = ep->udc;
1891 	unsigned long flags;
1892 	int ret;
1893 
1894 	if (ep->id == 0)
1895 		return -EINVAL;
1896 
1897 	dev_dbg(ep->udc->dev, "ep%u %s mpkts %d\n", ep->id,
1898 		ep->is_in ? "in" : "out", ep->ep.maxpacket);
1899 
1900 	spin_lock_irqsave(&ep->udc->lock, flags);
1901 	ep->disabled = 1;
1902 	/* Disable DMA interrupt */
1903 	if (ep->dma_regs) {
1904 		usbf_reg_bitclr(udc, USBF_REG_AHBBINTEN,
1905 			USBF_SYS_DMA_ENDINTEN_EPN(ep->id));
1906 		ep->bridge_on_dma_end = NULL;
1907 	}
1908 	/* disable interrupts for this endpoint */
1909 	usbf_reg_bitclr(udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(ep->id));
1910 	/* and the endpoint itself */
1911 	ret = usbf_epn_disable(ep);
1912 	spin_unlock_irqrestore(&ep->udc->lock, flags);
1913 
1914 	return ret;
1915 }
1916 
1917 static int usbf_ep0_queue(struct usbf_ep *ep0, struct usbf_req *req,
1918 			  gfp_t gfp_flags)
1919 {
1920 	int ret;
1921 
1922 	req->req.actual = 0;
1923 	req->req.status = -EINPROGRESS;
1924 	req->is_zero_sent = 0;
1925 
1926 	list_add_tail(&req->queue, &ep0->queue);
1927 
1928 	if (ep0->udc->ep0state == EP0_IN_STATUS_START_PHASE)
1929 		return 0;
1930 
1931 	if (!ep0->is_in)
1932 		return 0;
1933 
1934 	if (ep0->udc->ep0state == EP0_IN_STATUS_PHASE) {
1935 		if (req->req.length) {
1936 			dev_err(ep0->udc->dev,
1937 				"request lng %u for ep0 in status phase\n",
1938 				req->req.length);
1939 			return -EINVAL;
1940 		}
1941 		ep0->delayed_status = 0;
1942 	}
1943 	if (!ep0->is_processing) {
1944 		ret = usbf_ep0_pio_in(ep0, req);
1945 		if (ret != -EINPROGRESS) {
1946 			dev_err(ep0->udc->dev,
1947 				"queued request not in progress\n");
1948 			/* The request cannot be completed (ie
1949 			 * ret == 0) on the first call
1950 			 */
1951 			return ret ? ret : -EIO;
1952 		}
1953 	}
1954 
1955 	return 0;
1956 }
1957 
1958 static int usbf_epn_queue(struct usbf_ep *ep, struct usbf_req *req,
1959 			  gfp_t gfp_flags)
1960 {
1961 	int was_empty;
1962 	int ret;
1963 
1964 	if (ep->disabled) {
1965 		dev_err(ep->udc->dev, "ep%u request queue while disable\n",
1966 			ep->id);
1967 		return -ESHUTDOWN;
1968 	}
1969 
1970 	req->req.actual = 0;
1971 	req->req.status = -EINPROGRESS;
1972 	req->is_zero_sent = 0;
1973 	req->xfer_step = USBF_XFER_START;
1974 
1975 	was_empty = list_empty(&ep->queue);
1976 	list_add_tail(&req->queue, &ep->queue);
1977 	if (was_empty) {
1978 		ret = usbf_epn_start_queue(ep);
1979 		if (ret)
1980 			return ret;
1981 	}
1982 	return 0;
1983 }
1984 
1985 static int usbf_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1986 			 gfp_t gfp_flags)
1987 {
1988 	struct usbf_req *req = container_of(_req, struct usbf_req, req);
1989 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
1990 	struct usbf_udc *udc = ep->udc;
1991 	unsigned long flags;
1992 	int ret;
1993 
1994 	if (!_req || !_req->buf)
1995 		return -EINVAL;
1996 
1997 	if (!udc || !udc->driver)
1998 		return -EINVAL;
1999 
2000 	dev_dbg(ep->udc->dev, "ep%u %s req queue length %u, zero %u, short_not_ok %u\n",
2001 		ep->id, ep->is_in ? "in" : "out",
2002 		req->req.length, req->req.zero, req->req.short_not_ok);
2003 
2004 	spin_lock_irqsave(&ep->udc->lock, flags);
2005 	if (ep->id == 0)
2006 		ret = usbf_ep0_queue(ep, req, gfp_flags);
2007 	else
2008 		ret = usbf_epn_queue(ep, req, gfp_flags);
2009 	spin_unlock_irqrestore(&ep->udc->lock, flags);
2010 	return ret;
2011 }
2012 
2013 static int usbf_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
2014 {
2015 	struct usbf_req *req = container_of(_req, struct usbf_req, req);
2016 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
2017 	unsigned long flags;
2018 	int is_processing;
2019 	int first;
2020 	int ret;
2021 
2022 	spin_lock_irqsave(&ep->udc->lock, flags);
2023 
2024 	dev_dbg(ep->udc->dev, "ep%u %s req dequeue length %u/%u\n",
2025 		ep->id, ep->is_in ? "in" : "out",
2026 		req->req.actual, req->req.length);
2027 
2028 	first = list_is_first(&req->queue, &ep->queue);
2029 
2030 	/* Complete the request but avoid any operation that could be done
2031 	 * if a new request is queued during the request completion
2032 	 */
2033 	is_processing = ep->is_processing;
2034 	ep->is_processing = 1;
2035 	usbf_ep_req_done(ep, req, -ECONNRESET);
2036 	ep->is_processing = is_processing;
2037 
2038 	if (first) {
2039 		/* The first item in the list was dequeued.
2040 		 * This item could already be submitted to the hardware.
2041 		 * So, flush the fifo
2042 		 */
2043 		if (ep->id)
2044 			usbf_epn_fifo_flush(ep);
2045 		else
2046 			usbf_ep0_fifo_flush(ep);
2047 	}
2048 
2049 	if (ep->id == 0) {
2050 		/* We dequeue a request on ep0. On this endpoint, we can have
2051 		 * 1 request related to the data stage and/or 1 request
2052 		 * related to the status stage.
2053 		 * We dequeue one of them and so the USB control transaction
2054 		 * is no more coherent. The simple way to be consistent after
2055 		 * dequeuing is to stall and nuke the endpoint and wait the
2056 		 * next SETUP packet.
2057 		 */
2058 		usbf_ep_stall(ep, true);
2059 		usbf_ep_nuke(ep, -ECONNRESET);
2060 		ep->udc->ep0state = EP0_IDLE;
2061 		goto end;
2062 	}
2063 
2064 	if (!first)
2065 		goto end;
2066 
2067 	ret = usbf_epn_start_queue(ep);
2068 	if (ret) {
2069 		usbf_ep_stall(ep, true);
2070 		usbf_ep_nuke(ep, -EIO);
2071 	}
2072 end:
2073 	spin_unlock_irqrestore(&ep->udc->lock, flags);
2074 	return 0;
2075 }
2076 
2077 static struct usb_request *usbf_ep_alloc_request(struct usb_ep *_ep,
2078 						 gfp_t gfp_flags)
2079 {
2080 	struct usbf_req *req;
2081 
2082 	if (!_ep)
2083 		return NULL;
2084 
2085 	req = kzalloc(sizeof(*req), gfp_flags);
2086 	if (!req)
2087 		return NULL;
2088 
2089 	INIT_LIST_HEAD(&req->queue);
2090 
2091 	return &req->req;
2092 }
2093 
2094 static void usbf_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
2095 {
2096 	struct usbf_req *req;
2097 	unsigned long flags;
2098 	struct usbf_ep *ep;
2099 
2100 	if (!_ep || !_req)
2101 		return;
2102 
2103 	req = container_of(_req, struct usbf_req, req);
2104 	ep = container_of(_ep, struct usbf_ep, ep);
2105 
2106 	spin_lock_irqsave(&ep->udc->lock, flags);
2107 	list_del_init(&req->queue);
2108 	spin_unlock_irqrestore(&ep->udc->lock, flags);
2109 	kfree(req);
2110 }
2111 
2112 static int usbf_ep_set_halt(struct usb_ep *_ep, int halt)
2113 {
2114 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
2115 	unsigned long flags;
2116 	int ret;
2117 
2118 	if (ep->id == 0)
2119 		return -EINVAL;
2120 
2121 	spin_lock_irqsave(&ep->udc->lock, flags);
2122 
2123 	if (!list_empty(&ep->queue)) {
2124 		ret = -EAGAIN;
2125 		goto end;
2126 	}
2127 
2128 	usbf_ep_stall(ep, halt);
2129 	if (!halt)
2130 		ep->is_wedged = 0;
2131 
2132 	ret = 0;
2133 end:
2134 	spin_unlock_irqrestore(&ep->udc->lock, flags);
2135 
2136 	return ret;
2137 }
2138 
2139 static int usbf_ep_set_wedge(struct usb_ep *_ep)
2140 {
2141 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
2142 	unsigned long flags;
2143 	int ret;
2144 
2145 	if (ep->id == 0)
2146 		return -EINVAL;
2147 
2148 	spin_lock_irqsave(&ep->udc->lock, flags);
2149 	if (!list_empty(&ep->queue)) {
2150 		ret = -EAGAIN;
2151 		goto end;
2152 	}
2153 	usbf_ep_stall(ep, 1);
2154 	ep->is_wedged = 1;
2155 
2156 	ret = 0;
2157 end:
2158 	spin_unlock_irqrestore(&ep->udc->lock, flags);
2159 	return ret;
2160 }
2161 
2162 static struct usb_ep_ops usbf_ep_ops = {
2163 	.enable = usbf_ep_enable,
2164 	.disable = usbf_ep_disable,
2165 	.queue = usbf_ep_queue,
2166 	.dequeue = usbf_ep_dequeue,
2167 	.set_halt = usbf_ep_set_halt,
2168 	.set_wedge = usbf_ep_set_wedge,
2169 	.alloc_request = usbf_ep_alloc_request,
2170 	.free_request = usbf_ep_free_request,
2171 };
2172 
2173 static void usbf_ep0_req_complete(struct usb_ep *_ep, struct usb_request *_req)
2174 {
2175 }
2176 
2177 static void usbf_ep0_fill_req(struct usbf_ep *ep0, struct usbf_req *req,
2178 			      void *buf, unsigned int length,
2179 			      void (*complete)(struct usb_ep *_ep,
2180 					       struct usb_request *_req))
2181 {
2182 	if (buf && length)
2183 		memcpy(ep0->udc->ep0_buf, buf, length);
2184 
2185 	req->req.buf = ep0->udc->ep0_buf;
2186 	req->req.length = length;
2187 	req->req.dma = 0;
2188 	req->req.zero = true;
2189 	req->req.complete = complete ? complete : usbf_ep0_req_complete;
2190 	req->req.status = -EINPROGRESS;
2191 	req->req.context = NULL;
2192 	req->req.actual = 0;
2193 }
2194 
2195 static struct usbf_ep *usbf_get_ep_by_addr(struct usbf_udc *udc, u8 address)
2196 {
2197 	struct usbf_ep *ep;
2198 	unsigned int i;
2199 
2200 	if ((address & USB_ENDPOINT_NUMBER_MASK) == 0)
2201 		return &udc->ep[0];
2202 
2203 	for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
2204 		ep = &udc->ep[i];
2205 
2206 		if (!ep->ep.desc)
2207 			continue;
2208 
2209 		if (ep->ep.desc->bEndpointAddress == address)
2210 			return ep;
2211 	}
2212 
2213 	return NULL;
2214 }
2215 
2216 static int usbf_req_delegate(struct usbf_udc *udc,
2217 			     const struct usb_ctrlrequest *ctrlrequest)
2218 {
2219 	int ret;
2220 
2221 	spin_unlock(&udc->lock);
2222 	ret = udc->driver->setup(&udc->gadget, ctrlrequest);
2223 	spin_lock(&udc->lock);
2224 	if (ret < 0) {
2225 		dev_dbg(udc->dev, "udc driver setup failed %d\n", ret);
2226 		return ret;
2227 	}
2228 	if (ret == USB_GADGET_DELAYED_STATUS) {
2229 		dev_dbg(udc->dev, "delayed status set\n");
2230 		udc->ep[0].delayed_status = 1;
2231 		return 0;
2232 	}
2233 	return ret;
2234 }
2235 
2236 static int usbf_req_get_status(struct usbf_udc *udc,
2237 			       const struct usb_ctrlrequest *ctrlrequest)
2238 {
2239 	struct usbf_ep *ep;
2240 	u16 status_data;
2241 	u16 wLength;
2242 	u16 wValue;
2243 	u16 wIndex;
2244 
2245 	wValue  = le16_to_cpu(ctrlrequest->wValue);
2246 	wLength = le16_to_cpu(ctrlrequest->wLength);
2247 	wIndex  = le16_to_cpu(ctrlrequest->wIndex);
2248 
2249 	switch (ctrlrequest->bRequestType) {
2250 	case USB_DIR_IN | USB_RECIP_DEVICE | USB_TYPE_STANDARD:
2251 		if ((wValue != 0) || (wIndex != 0) || (wLength != 2))
2252 			goto delegate;
2253 
2254 		status_data = 0;
2255 		if (udc->gadget.is_selfpowered)
2256 			status_data |= BIT(USB_DEVICE_SELF_POWERED);
2257 
2258 		if (udc->is_remote_wakeup)
2259 			status_data |= BIT(USB_DEVICE_REMOTE_WAKEUP);
2260 
2261 		break;
2262 
2263 	case USB_DIR_IN | USB_RECIP_ENDPOINT | USB_TYPE_STANDARD:
2264 		if ((wValue != 0) || (wLength != 2))
2265 			goto delegate;
2266 
2267 		ep = usbf_get_ep_by_addr(udc, wIndex);
2268 		if (!ep)
2269 			return -EINVAL;
2270 
2271 		status_data = 0;
2272 		if (usbf_ep_is_stalled(ep))
2273 			status_data |= cpu_to_le16(1);
2274 		break;
2275 
2276 	case USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_STANDARD:
2277 		if ((wValue != 0) || (wLength != 2))
2278 			goto delegate;
2279 		status_data = 0;
2280 		break;
2281 
2282 	default:
2283 		goto delegate;
2284 	}
2285 
2286 	usbf_ep0_fill_req(&udc->ep[0], &udc->setup_reply, &status_data,
2287 			  sizeof(status_data), NULL);
2288 	usbf_ep0_queue(&udc->ep[0], &udc->setup_reply, GFP_ATOMIC);
2289 
2290 	return 0;
2291 
2292 delegate:
2293 	return usbf_req_delegate(udc, ctrlrequest);
2294 }
2295 
2296 static int usbf_req_clear_set_feature(struct usbf_udc *udc,
2297 				      const struct usb_ctrlrequest *ctrlrequest,
2298 				      bool is_set)
2299 {
2300 	struct usbf_ep *ep;
2301 	u16 wLength;
2302 	u16 wValue;
2303 	u16 wIndex;
2304 
2305 	wValue  = le16_to_cpu(ctrlrequest->wValue);
2306 	wLength = le16_to_cpu(ctrlrequest->wLength);
2307 	wIndex  = le16_to_cpu(ctrlrequest->wIndex);
2308 
2309 	switch (ctrlrequest->bRequestType) {
2310 	case USB_DIR_OUT | USB_RECIP_DEVICE:
2311 		if ((wIndex != 0) || (wLength != 0))
2312 			goto delegate;
2313 
2314 		if (wValue != cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP))
2315 			goto delegate;
2316 
2317 		udc->is_remote_wakeup = is_set;
2318 		break;
2319 
2320 	case USB_DIR_OUT | USB_RECIP_ENDPOINT:
2321 		if (wLength != 0)
2322 			goto delegate;
2323 
2324 		ep = usbf_get_ep_by_addr(udc, wIndex);
2325 		if (!ep)
2326 			return -EINVAL;
2327 
2328 		if ((ep->id == 0) && is_set) {
2329 			/* Endpoint 0 cannot be halted (stalled)
2330 			 * Returning an error code leads to a STALL on this ep0
2331 			 * but keep the automate in a consistent state.
2332 			 */
2333 			return -EINVAL;
2334 		}
2335 		if (ep->is_wedged && !is_set) {
2336 			/* Ignore CLEAR_FEATURE(HALT ENDPOINT) when the
2337 			 * endpoint is wedged
2338 			 */
2339 			break;
2340 		}
2341 		usbf_ep_stall(ep, is_set);
2342 		break;
2343 
2344 	default:
2345 		goto delegate;
2346 	}
2347 
2348 	return 0;
2349 
2350 delegate:
2351 	return usbf_req_delegate(udc, ctrlrequest);
2352 }
2353 
2354 static void usbf_ep0_req_set_address_complete(struct usb_ep *_ep,
2355 					      struct usb_request *_req)
2356 {
2357 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
2358 
2359 	/* The status phase of the SET_ADDRESS request is completed ... */
2360 	if (_req->status == 0) {
2361 		/* ... without any errors -> Signaled the state to the core. */
2362 		usb_gadget_set_state(&ep->udc->gadget, USB_STATE_ADDRESS);
2363 	}
2364 
2365 	/* In case of request failure, there is no need to revert the address
2366 	 * value set to the hardware as the hardware will take care of the
2367 	 * value only if the status stage is completed normally.
2368 	 */
2369 }
2370 
2371 static int usbf_req_set_address(struct usbf_udc *udc,
2372 				const struct usb_ctrlrequest *ctrlrequest)
2373 {
2374 	u16 wLength;
2375 	u16 wValue;
2376 	u16 wIndex;
2377 	u32 addr;
2378 
2379 	wValue  = le16_to_cpu(ctrlrequest->wValue);
2380 	wLength = le16_to_cpu(ctrlrequest->wLength);
2381 	wIndex  = le16_to_cpu(ctrlrequest->wIndex);
2382 
2383 	if (ctrlrequest->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
2384 		goto delegate;
2385 
2386 	if ((wIndex != 0) || (wLength != 0) || (wValue > 127))
2387 		return -EINVAL;
2388 
2389 	addr = wValue;
2390 	/* The hardware will take care of this USB address after the status
2391 	 * stage of the SET_ADDRESS request is completed normally.
2392 	 * It is safe to write it now
2393 	 */
2394 	usbf_reg_writel(udc, USBF_REG_USB_ADDRESS, USBF_USB_SET_USB_ADDR(addr));
2395 
2396 	/* Queued the status request */
2397 	usbf_ep0_fill_req(&udc->ep[0], &udc->setup_reply, NULL, 0,
2398 			  usbf_ep0_req_set_address_complete);
2399 	usbf_ep0_queue(&udc->ep[0], &udc->setup_reply, GFP_ATOMIC);
2400 
2401 	return 0;
2402 
2403 delegate:
2404 	return usbf_req_delegate(udc, ctrlrequest);
2405 }
2406 
2407 static int usbf_req_set_configuration(struct usbf_udc *udc,
2408 				      const struct usb_ctrlrequest *ctrlrequest)
2409 {
2410 	u16 wLength;
2411 	u16 wValue;
2412 	u16 wIndex;
2413 	int ret;
2414 
2415 	ret = usbf_req_delegate(udc, ctrlrequest);
2416 	if (ret)
2417 		return ret;
2418 
2419 	wValue  = le16_to_cpu(ctrlrequest->wValue);
2420 	wLength = le16_to_cpu(ctrlrequest->wLength);
2421 	wIndex  = le16_to_cpu(ctrlrequest->wIndex);
2422 
2423 	if ((ctrlrequest->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE)) ||
2424 	    (wIndex != 0) || (wLength != 0)) {
2425 		/* No error detected by driver->setup() but it is not an USB2.0
2426 		 * Ch9 SET_CONFIGURATION.
2427 		 * Nothing more to do
2428 		 */
2429 		return 0;
2430 	}
2431 
2432 	if (wValue & 0x00FF) {
2433 		usbf_reg_bitset(udc, USBF_REG_USB_CONTROL, USBF_USB_CONF);
2434 	} else {
2435 		usbf_reg_bitclr(udc, USBF_REG_USB_CONTROL, USBF_USB_CONF);
2436 		/* Go back to Address State */
2437 		spin_unlock(&udc->lock);
2438 		usb_gadget_set_state(&udc->gadget, USB_STATE_ADDRESS);
2439 		spin_lock(&udc->lock);
2440 	}
2441 
2442 	return 0;
2443 }
2444 
2445 static int usbf_handle_ep0_setup(struct usbf_ep *ep0)
2446 {
2447 	union {
2448 		struct usb_ctrlrequest ctrlreq;
2449 		u32 raw[2];
2450 	} crq;
2451 	struct usbf_udc *udc = ep0->udc;
2452 	int ret;
2453 
2454 	/* Read setup data (ie the USB control request) */
2455 	crq.raw[0] = usbf_reg_readl(udc, USBF_REG_SETUP_DATA0);
2456 	crq.raw[1] = usbf_reg_readl(udc, USBF_REG_SETUP_DATA1);
2457 
2458 	dev_dbg(ep0->udc->dev,
2459 		"ep0 req%02x.%02x, wValue 0x%04x, wIndex 0x%04x, wLength 0x%04x\n",
2460 		crq.ctrlreq.bRequestType, crq.ctrlreq.bRequest,
2461 		crq.ctrlreq.wValue, crq.ctrlreq.wIndex, crq.ctrlreq.wLength);
2462 
2463 	/* Set current EP0 state according to the received request */
2464 	if (crq.ctrlreq.wLength) {
2465 		if (crq.ctrlreq.bRequestType & USB_DIR_IN) {
2466 			udc->ep0state = EP0_IN_DATA_PHASE;
2467 			usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
2468 				USBF_EP0_INAK,
2469 				USBF_EP0_INAK_EN);
2470 			ep0->is_in = 1;
2471 		} else {
2472 			udc->ep0state = EP0_OUT_DATA_PHASE;
2473 			usbf_ep_reg_bitclr(ep0, USBF_REG_EP0_CONTROL,
2474 				USBF_EP0_ONAK);
2475 			ep0->is_in = 0;
2476 		}
2477 	} else {
2478 		udc->ep0state = EP0_IN_STATUS_START_PHASE;
2479 		ep0->is_in = 1;
2480 	}
2481 
2482 	/* We starts a new control transfer -> Clear the delayed status flag */
2483 	ep0->delayed_status = 0;
2484 
2485 	if ((crq.ctrlreq.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) {
2486 		/* This is not a USB standard request -> delelate */
2487 		goto delegate;
2488 	}
2489 
2490 	switch (crq.ctrlreq.bRequest) {
2491 	case USB_REQ_GET_STATUS:
2492 		ret = usbf_req_get_status(udc, &crq.ctrlreq);
2493 		break;
2494 
2495 	case USB_REQ_CLEAR_FEATURE:
2496 		ret = usbf_req_clear_set_feature(udc, &crq.ctrlreq, false);
2497 		break;
2498 
2499 	case USB_REQ_SET_FEATURE:
2500 		ret = usbf_req_clear_set_feature(udc, &crq.ctrlreq, true);
2501 		break;
2502 
2503 	case USB_REQ_SET_ADDRESS:
2504 		ret = usbf_req_set_address(udc, &crq.ctrlreq);
2505 		break;
2506 
2507 	case USB_REQ_SET_CONFIGURATION:
2508 		ret = usbf_req_set_configuration(udc, &crq.ctrlreq);
2509 		break;
2510 
2511 	default:
2512 		goto delegate;
2513 	}
2514 
2515 	return ret;
2516 
2517 delegate:
2518 	return usbf_req_delegate(udc, &crq.ctrlreq);
2519 }
2520 
2521 static int usbf_handle_ep0_data_status(struct usbf_ep *ep0,
2522 				  const char *ep0state_name,
2523 				  enum usbf_ep0state next_ep0state)
2524 {
2525 	struct usbf_udc *udc = ep0->udc;
2526 	int ret;
2527 
2528 	ret = usbf_ep_process_queue(ep0);
2529 	switch (ret) {
2530 	case -ENOENT:
2531 		dev_err(udc->dev,
2532 			"no request available for ep0 %s phase\n",
2533 			ep0state_name);
2534 		break;
2535 	case -EINPROGRESS:
2536 		/* More data needs to be processed */
2537 		ret = 0;
2538 		break;
2539 	case 0:
2540 		/* All requests in the queue are processed */
2541 		udc->ep0state = next_ep0state;
2542 		break;
2543 	default:
2544 		dev_err(udc->dev,
2545 			"process queue failed for ep0 %s phase (%d)\n",
2546 			ep0state_name, ret);
2547 		break;
2548 	}
2549 	return ret;
2550 }
2551 
2552 static int usbf_handle_ep0_out_status_start(struct usbf_ep *ep0)
2553 {
2554 	struct usbf_udc *udc = ep0->udc;
2555 	struct usbf_req *req;
2556 
2557 	usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
2558 				USBF_EP0_ONAK,
2559 				USBF_EP0_PIDCLR);
2560 	ep0->is_in = 0;
2561 
2562 	req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
2563 	if (!req) {
2564 		usbf_ep0_fill_req(ep0, &udc->setup_reply, NULL, 0, NULL);
2565 		usbf_ep0_queue(ep0, &udc->setup_reply, GFP_ATOMIC);
2566 	} else {
2567 		if (req->req.length) {
2568 			dev_err(udc->dev,
2569 				"queued request length %u for ep0 out status phase\n",
2570 				req->req.length);
2571 		}
2572 	}
2573 	udc->ep0state = EP0_OUT_STATUS_PHASE;
2574 	return 0;
2575 }
2576 
2577 static int usbf_handle_ep0_in_status_start(struct usbf_ep *ep0)
2578 {
2579 	struct usbf_udc *udc = ep0->udc;
2580 	struct usbf_req *req;
2581 	int ret;
2582 
2583 	usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
2584 				USBF_EP0_INAK,
2585 				USBF_EP0_INAK_EN | USBF_EP0_PIDCLR);
2586 	ep0->is_in = 1;
2587 
2588 	/* Queue request for status if needed */
2589 	req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
2590 	if (!req) {
2591 		if (ep0->delayed_status) {
2592 			dev_dbg(ep0->udc->dev,
2593 				"EP0_IN_STATUS_START_PHASE ep0->delayed_status set\n");
2594 			udc->ep0state = EP0_IN_STATUS_PHASE;
2595 			return 0;
2596 		}
2597 
2598 		usbf_ep0_fill_req(ep0, &udc->setup_reply, NULL,
2599 			  0, NULL);
2600 		usbf_ep0_queue(ep0, &udc->setup_reply,
2601 			       GFP_ATOMIC);
2602 
2603 		req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
2604 	} else {
2605 		if (req->req.length) {
2606 			dev_err(udc->dev,
2607 				"queued request length %u for ep0 in status phase\n",
2608 				req->req.length);
2609 		}
2610 	}
2611 
2612 	ret = usbf_ep0_pio_in(ep0, req);
2613 	if (ret != -EINPROGRESS) {
2614 		usbf_ep_req_done(ep0, req, ret);
2615 		udc->ep0state = EP0_IN_STATUS_END_PHASE;
2616 		return 0;
2617 	}
2618 
2619 	udc->ep0state = EP0_IN_STATUS_PHASE;
2620 	return 0;
2621 }
2622 
2623 static void usbf_ep0_interrupt(struct usbf_ep *ep0)
2624 {
2625 	struct usbf_udc *udc = ep0->udc;
2626 	u32 sts, prev_sts;
2627 	int prev_ep0state;
2628 	int ret;
2629 
2630 	ep0->status = usbf_ep_reg_readl(ep0, USBF_REG_EP0_STATUS);
2631 	usbf_ep_reg_writel(ep0, USBF_REG_EP0_STATUS, ~ep0->status);
2632 
2633 	dev_dbg(ep0->udc->dev, "ep0 status=0x%08x, enable=%08x\n, ctrl=0x%08x\n",
2634 		ep0->status,
2635 		usbf_ep_reg_readl(ep0, USBF_REG_EP0_INT_ENA),
2636 		usbf_ep_reg_readl(ep0, USBF_REG_EP0_CONTROL));
2637 
2638 	sts = ep0->status & (USBF_EP0_SETUP_INT | USBF_EP0_IN_INT | USBF_EP0_OUT_INT |
2639 			     USBF_EP0_OUT_NULL_INT | USBF_EP0_STG_START_INT |
2640 			     USBF_EP0_STG_END_INT);
2641 
2642 	ret = 0;
2643 	do {
2644 		dev_dbg(ep0->udc->dev, "udc->ep0state=%d\n", udc->ep0state);
2645 
2646 		prev_sts = sts;
2647 		prev_ep0state = udc->ep0state;
2648 		switch (udc->ep0state) {
2649 		case EP0_IDLE:
2650 			if (!(sts & USBF_EP0_SETUP_INT))
2651 				break;
2652 
2653 			sts &= ~USBF_EP0_SETUP_INT;
2654 			dev_dbg(ep0->udc->dev, "ep0 handle setup\n");
2655 			ret = usbf_handle_ep0_setup(ep0);
2656 			break;
2657 
2658 		case EP0_IN_DATA_PHASE:
2659 			if (!(sts & USBF_EP0_IN_INT))
2660 				break;
2661 
2662 			sts &= ~USBF_EP0_IN_INT;
2663 			dev_dbg(ep0->udc->dev, "ep0 handle in data phase\n");
2664 			ret = usbf_handle_ep0_data_status(ep0,
2665 				"in data", EP0_OUT_STATUS_START_PHASE);
2666 			break;
2667 
2668 		case EP0_OUT_STATUS_START_PHASE:
2669 			if (!(sts & USBF_EP0_STG_START_INT))
2670 				break;
2671 
2672 			sts &= ~USBF_EP0_STG_START_INT;
2673 			dev_dbg(ep0->udc->dev, "ep0 handle out status start phase\n");
2674 			ret = usbf_handle_ep0_out_status_start(ep0);
2675 			break;
2676 
2677 		case EP0_OUT_STATUS_PHASE:
2678 			if (!(sts & (USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT)))
2679 				break;
2680 
2681 			sts &= ~(USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT);
2682 			dev_dbg(ep0->udc->dev, "ep0 handle out status phase\n");
2683 			ret = usbf_handle_ep0_data_status(ep0,
2684 				"out status",
2685 				EP0_OUT_STATUS_END_PHASE);
2686 			break;
2687 
2688 		case EP0_OUT_STATUS_END_PHASE:
2689 			if (!(sts & (USBF_EP0_STG_END_INT | USBF_EP0_SETUP_INT)))
2690 				break;
2691 
2692 			sts &= ~USBF_EP0_STG_END_INT;
2693 			dev_dbg(ep0->udc->dev, "ep0 handle out status end phase\n");
2694 			udc->ep0state = EP0_IDLE;
2695 			break;
2696 
2697 		case EP0_OUT_DATA_PHASE:
2698 			if (!(sts & (USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT)))
2699 				break;
2700 
2701 			sts &= ~(USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT);
2702 			dev_dbg(ep0->udc->dev, "ep0 handle out data phase\n");
2703 			ret = usbf_handle_ep0_data_status(ep0,
2704 				"out data", EP0_IN_STATUS_START_PHASE);
2705 			break;
2706 
2707 		case EP0_IN_STATUS_START_PHASE:
2708 			if (!(sts & USBF_EP0_STG_START_INT))
2709 				break;
2710 
2711 			sts &= ~USBF_EP0_STG_START_INT;
2712 			dev_dbg(ep0->udc->dev, "ep0 handle in status start phase\n");
2713 			ret = usbf_handle_ep0_in_status_start(ep0);
2714 			break;
2715 
2716 		case EP0_IN_STATUS_PHASE:
2717 			if (!(sts & USBF_EP0_IN_INT))
2718 				break;
2719 
2720 			sts &= ~USBF_EP0_IN_INT;
2721 			dev_dbg(ep0->udc->dev, "ep0 handle in status phase\n");
2722 			ret = usbf_handle_ep0_data_status(ep0,
2723 				"in status", EP0_IN_STATUS_END_PHASE);
2724 			break;
2725 
2726 		case EP0_IN_STATUS_END_PHASE:
2727 			if (!(sts & (USBF_EP0_STG_END_INT | USBF_EP0_SETUP_INT)))
2728 				break;
2729 
2730 			sts &= ~USBF_EP0_STG_END_INT;
2731 			dev_dbg(ep0->udc->dev, "ep0 handle in status end\n");
2732 			udc->ep0state = EP0_IDLE;
2733 			break;
2734 
2735 		default:
2736 			udc->ep0state = EP0_IDLE;
2737 			break;
2738 		}
2739 
2740 		if (ret) {
2741 			dev_dbg(ep0->udc->dev, "ep0 failed (%d)\n", ret);
2742 			/* Failure -> stall.
2743 			 * This stall state will be automatically cleared when
2744 			 * the IP receives the next SETUP packet
2745 			 */
2746 			usbf_ep_stall(ep0, true);
2747 
2748 			/* Remove anything that was pending */
2749 			usbf_ep_nuke(ep0, -EPROTO);
2750 
2751 			udc->ep0state = EP0_IDLE;
2752 			break;
2753 		}
2754 
2755 	} while ((prev_ep0state != udc->ep0state) || (prev_sts != sts));
2756 
2757 	dev_dbg(ep0->udc->dev, "ep0 done udc->ep0state=%d, status=0x%08x. next=0x%08x\n",
2758 		udc->ep0state, sts,
2759 		usbf_ep_reg_readl(ep0, USBF_REG_EP0_STATUS));
2760 }
2761 
2762 static void usbf_epn_process_queue(struct usbf_ep *epn)
2763 {
2764 	int ret;
2765 
2766 	ret = usbf_ep_process_queue(epn);
2767 	switch (ret) {
2768 	case -ENOENT:
2769 		dev_warn(epn->udc->dev, "ep%u %s, no request available\n",
2770 			epn->id, epn->is_in ? "in" : "out");
2771 		break;
2772 	case -EINPROGRESS:
2773 		/* More data needs to be processed */
2774 		ret = 0;
2775 		break;
2776 	case 0:
2777 		/* All requests in the queue are processed */
2778 		break;
2779 	default:
2780 		dev_err(epn->udc->dev, "ep%u %s, process queue failed (%d)\n",
2781 			epn->id, epn->is_in ? "in" : "out", ret);
2782 		break;
2783 	}
2784 
2785 	if (ret) {
2786 		dev_dbg(epn->udc->dev, "ep%u %s failed (%d)\n", epn->id,
2787 			epn->is_in ? "in" : "out", ret);
2788 		usbf_ep_stall(epn, true);
2789 		usbf_ep_nuke(epn, ret);
2790 	}
2791 }
2792 
2793 static void usbf_epn_interrupt(struct usbf_ep *epn)
2794 {
2795 	u32 sts;
2796 	u32 ena;
2797 
2798 	epn->status = usbf_ep_reg_readl(epn, USBF_REG_EPN_STATUS);
2799 	ena = usbf_ep_reg_readl(epn, USBF_REG_EPN_INT_ENA);
2800 	usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(epn->status & ena));
2801 
2802 	dev_dbg(epn->udc->dev, "ep%u %s status=0x%08x, enable=%08x\n, ctrl=0x%08x\n",
2803 		epn->id, epn->is_in ? "in" : "out", epn->status, ena,
2804 		usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL));
2805 
2806 	if (epn->disabled) {
2807 		dev_warn(epn->udc->dev, "ep%u %s, interrupt while disabled\n",
2808 			epn->id, epn->is_in ? "in" : "out");
2809 		return;
2810 	}
2811 
2812 	sts = epn->status & ena;
2813 
2814 	if (sts & (USBF_EPN_IN_END_INT | USBF_EPN_IN_INT)) {
2815 		sts &= ~(USBF_EPN_IN_END_INT | USBF_EPN_IN_INT);
2816 		dev_dbg(epn->udc->dev, "ep%u %s process queue (in interrupts)\n",
2817 			epn->id, epn->is_in ? "in" : "out");
2818 		usbf_epn_process_queue(epn);
2819 	}
2820 
2821 	if (sts & (USBF_EPN_OUT_END_INT | USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT)) {
2822 		sts &= ~(USBF_EPN_OUT_END_INT | USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
2823 		dev_dbg(epn->udc->dev, "ep%u %s process queue (out interrupts)\n",
2824 			epn->id, epn->is_in ? "in" : "out");
2825 		usbf_epn_process_queue(epn);
2826 	}
2827 
2828 	dev_dbg(epn->udc->dev, "ep%u %s done status=0x%08x. next=0x%08x\n",
2829 		epn->id, epn->is_in ? "in" : "out",
2830 		sts, usbf_ep_reg_readl(epn, USBF_REG_EPN_STATUS));
2831 }
2832 
2833 static void usbf_ep_reset(struct usbf_ep *ep)
2834 {
2835 	ep->status = 0;
2836 	/* Remove anything that was pending */
2837 	usbf_ep_nuke(ep, -ESHUTDOWN);
2838 }
2839 
2840 static void usbf_reset(struct usbf_udc *udc)
2841 {
2842 	int i;
2843 
2844 	for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
2845 		if (udc->ep[i].disabled)
2846 			continue;
2847 
2848 		usbf_ep_reset(&udc->ep[i]);
2849 	}
2850 
2851 	if (usbf_reg_readl(udc, USBF_REG_USB_STATUS) & USBF_USB_SPEED_MODE)
2852 		udc->gadget.speed = USB_SPEED_HIGH;
2853 	else
2854 		udc->gadget.speed = USB_SPEED_FULL;
2855 
2856 	/* Remote wakeup feature must be disabled on USB bus reset */
2857 	udc->is_remote_wakeup = false;
2858 
2859 	/* Enable endpoint zero */
2860 	usbf_ep0_enable(&udc->ep[0]);
2861 
2862 	if (udc->driver) {
2863 		/* Signal the reset */
2864 		spin_unlock(&udc->lock);
2865 		usb_gadget_udc_reset(&udc->gadget, udc->driver);
2866 		spin_lock(&udc->lock);
2867 	}
2868 }
2869 
2870 static void usbf_driver_suspend(struct usbf_udc *udc)
2871 {
2872 	if (udc->is_usb_suspended) {
2873 		dev_dbg(udc->dev, "already suspended\n");
2874 		return;
2875 	}
2876 
2877 	dev_dbg(udc->dev, "do usb suspend\n");
2878 	udc->is_usb_suspended = true;
2879 
2880 	if (udc->driver && udc->driver->suspend) {
2881 		spin_unlock(&udc->lock);
2882 		udc->driver->suspend(&udc->gadget);
2883 		spin_lock(&udc->lock);
2884 
2885 		/* The datasheet tells to set the USB_CONTROL register SUSPEND
2886 		 * bit when the USB bus suspend is detected.
2887 		 * This bit stops the clocks (clocks for EPC, SIE, USBPHY) but
2888 		 * these clocks seems not used only by the USB device. Some
2889 		 * UARTs can be lost ...
2890 		 * So, do not set the USB_CONTROL register SUSPEND bit.
2891 		 */
2892 	}
2893 }
2894 
2895 static void usbf_driver_resume(struct usbf_udc *udc)
2896 {
2897 	if (!udc->is_usb_suspended)
2898 		return;
2899 
2900 	dev_dbg(udc->dev, "do usb resume\n");
2901 	udc->is_usb_suspended = false;
2902 
2903 	if (udc->driver && udc->driver->resume) {
2904 		spin_unlock(&udc->lock);
2905 		udc->driver->resume(&udc->gadget);
2906 		spin_lock(&udc->lock);
2907 	}
2908 }
2909 
2910 static irqreturn_t usbf_epc_irq(int irq, void *_udc)
2911 {
2912 	struct usbf_udc *udc = (struct usbf_udc *)_udc;
2913 	unsigned long flags;
2914 	struct usbf_ep *ep;
2915 	u32 int_sts;
2916 	u32 int_en;
2917 	int i;
2918 
2919 	spin_lock_irqsave(&udc->lock, flags);
2920 
2921 	int_en = usbf_reg_readl(udc, USBF_REG_USB_INT_ENA);
2922 	int_sts = usbf_reg_readl(udc, USBF_REG_USB_INT_STA) & int_en;
2923 	usbf_reg_writel(udc, USBF_REG_USB_INT_STA, ~int_sts);
2924 
2925 	dev_dbg(udc->dev, "int_sts=0x%08x\n", int_sts);
2926 
2927 	if (int_sts & USBF_USB_RSUM_INT) {
2928 		dev_dbg(udc->dev, "handle resume\n");
2929 		usbf_driver_resume(udc);
2930 	}
2931 
2932 	if (int_sts & USBF_USB_USB_RST_INT) {
2933 		dev_dbg(udc->dev, "handle bus reset\n");
2934 		usbf_driver_resume(udc);
2935 		usbf_reset(udc);
2936 	}
2937 
2938 	if (int_sts & USBF_USB_SPEED_MODE_INT) {
2939 		if (usbf_reg_readl(udc, USBF_REG_USB_STATUS) & USBF_USB_SPEED_MODE)
2940 			udc->gadget.speed = USB_SPEED_HIGH;
2941 		else
2942 			udc->gadget.speed = USB_SPEED_FULL;
2943 		dev_dbg(udc->dev, "handle speed change (%s)\n",
2944 			udc->gadget.speed == USB_SPEED_HIGH ? "High" : "Full");
2945 	}
2946 
2947 	if (int_sts & USBF_USB_EPN_INT(0)) {
2948 		usbf_driver_resume(udc);
2949 		usbf_ep0_interrupt(&udc->ep[0]);
2950 	}
2951 
2952 	for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
2953 		ep = &udc->ep[i];
2954 
2955 		if (int_sts & USBF_USB_EPN_INT(i)) {
2956 			usbf_driver_resume(udc);
2957 			usbf_epn_interrupt(ep);
2958 		}
2959 	}
2960 
2961 	if (int_sts & USBF_USB_SPND_INT) {
2962 		dev_dbg(udc->dev, "handle suspend\n");
2963 		usbf_driver_suspend(udc);
2964 	}
2965 
2966 	spin_unlock_irqrestore(&udc->lock, flags);
2967 
2968 	return IRQ_HANDLED;
2969 }
2970 
2971 static irqreturn_t usbf_ahb_epc_irq(int irq, void *_udc)
2972 {
2973 	struct usbf_udc *udc = (struct usbf_udc *)_udc;
2974 	unsigned long flags;
2975 	struct usbf_ep *epn;
2976 	u32 sysbint;
2977 	void (*ep_action)(struct usbf_ep *epn);
2978 	int i;
2979 
2980 	spin_lock_irqsave(&udc->lock, flags);
2981 
2982 	/* Read and ack interrupts */
2983 	sysbint = usbf_reg_readl(udc, USBF_REG_AHBBINT);
2984 	usbf_reg_writel(udc, USBF_REG_AHBBINT, sysbint);
2985 
2986 	if ((sysbint & USBF_SYS_VBUS_INT) == USBF_SYS_VBUS_INT) {
2987 		if (usbf_reg_readl(udc, USBF_REG_EPCTR) & USBF_SYS_VBUS_LEVEL) {
2988 			dev_dbg(udc->dev, "handle vbus (1)\n");
2989 			spin_unlock(&udc->lock);
2990 			usb_udc_vbus_handler(&udc->gadget, true);
2991 			usb_gadget_set_state(&udc->gadget, USB_STATE_POWERED);
2992 			spin_lock(&udc->lock);
2993 		} else {
2994 			dev_dbg(udc->dev, "handle vbus (0)\n");
2995 			udc->is_usb_suspended = false;
2996 			spin_unlock(&udc->lock);
2997 			usb_udc_vbus_handler(&udc->gadget, false);
2998 			usb_gadget_set_state(&udc->gadget,
2999 					     USB_STATE_NOTATTACHED);
3000 			spin_lock(&udc->lock);
3001 		}
3002 	}
3003 
3004 	for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
3005 		if (sysbint & USBF_SYS_DMA_ENDINT_EPN(i)) {
3006 			epn = &udc->ep[i];
3007 			dev_dbg(epn->udc->dev,
3008 				"ep%u handle DMA complete. action=%ps\n",
3009 				epn->id, epn->bridge_on_dma_end);
3010 			ep_action = epn->bridge_on_dma_end;
3011 			if (ep_action) {
3012 				epn->bridge_on_dma_end = NULL;
3013 				ep_action(epn);
3014 			}
3015 		}
3016 	}
3017 
3018 	spin_unlock_irqrestore(&udc->lock, flags);
3019 
3020 	return IRQ_HANDLED;
3021 }
3022 
3023 static int usbf_udc_start(struct usb_gadget *gadget,
3024 			  struct usb_gadget_driver *driver)
3025 {
3026 	struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
3027 	unsigned long flags;
3028 
3029 	dev_info(udc->dev, "start (driver '%s')\n", driver->driver.name);
3030 
3031 	spin_lock_irqsave(&udc->lock, flags);
3032 
3033 	/* hook up the driver */
3034 	udc->driver = driver;
3035 
3036 	/* Enable VBUS interrupt */
3037 	usbf_reg_writel(udc, USBF_REG_AHBBINTEN, USBF_SYS_VBUS_INTEN);
3038 
3039 	spin_unlock_irqrestore(&udc->lock, flags);
3040 
3041 	return 0;
3042 }
3043 
3044 static int usbf_udc_stop(struct usb_gadget *gadget)
3045 {
3046 	struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
3047 	unsigned long flags;
3048 
3049 	spin_lock_irqsave(&udc->lock, flags);
3050 
3051 	/* Disable VBUS interrupt */
3052 	usbf_reg_writel(udc, USBF_REG_AHBBINTEN, 0);
3053 
3054 	udc->driver = NULL;
3055 
3056 	spin_unlock_irqrestore(&udc->lock, flags);
3057 
3058 	dev_info(udc->dev, "stopped\n");
3059 
3060 	return 0;
3061 }
3062 
3063 static int usbf_get_frame(struct usb_gadget *gadget)
3064 {
3065 	struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
3066 
3067 	return USBF_USB_GET_FRAME(usbf_reg_readl(udc, USBF_REG_USB_ADDRESS));
3068 }
3069 
3070 static void usbf_attach(struct usbf_udc *udc)
3071 {
3072 	/* Enable USB signal to Function PHY
3073 	 * D+ signal Pull-up
3074 	 * Disable endpoint 0, it will be automatically enable when a USB reset
3075 	 * is received.
3076 	 * Disable the other endpoints
3077 	 */
3078 	usbf_reg_clrset(udc, USBF_REG_USB_CONTROL,
3079 		USBF_USB_CONNECTB | USBF_USB_DEFAULT | USBF_USB_CONF,
3080 		USBF_USB_PUE2);
3081 
3082 	/* Enable reset and mode change interrupts */
3083 	usbf_reg_bitset(udc, USBF_REG_USB_INT_ENA,
3084 		USBF_USB_USB_RST_EN | USBF_USB_SPEED_MODE_EN | USBF_USB_RSUM_EN | USBF_USB_SPND_EN);
3085 }
3086 
3087 static void usbf_detach(struct usbf_udc *udc)
3088 {
3089 	int i;
3090 
3091 	/* Disable interrupts */
3092 	usbf_reg_writel(udc, USBF_REG_USB_INT_ENA, 0);
3093 
3094 	for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
3095 		if (udc->ep[i].disabled)
3096 			continue;
3097 
3098 		usbf_ep_reset(&udc->ep[i]);
3099 	}
3100 
3101 	/* Disable USB signal to Function PHY
3102 	 * Do not Pull-up D+ signal
3103 	 * Disable endpoint 0
3104 	 * Disable the other endpoints
3105 	 */
3106 	usbf_reg_clrset(udc, USBF_REG_USB_CONTROL,
3107 		USBF_USB_PUE2 | USBF_USB_DEFAULT | USBF_USB_CONF,
3108 		USBF_USB_CONNECTB);
3109 }
3110 
3111 static int usbf_pullup(struct usb_gadget *gadget, int is_on)
3112 {
3113 	struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
3114 	unsigned long flags;
3115 
3116 	dev_dbg(udc->dev, "pullup %d\n", is_on);
3117 
3118 	spin_lock_irqsave(&udc->lock, flags);
3119 	if (is_on)
3120 		usbf_attach(udc);
3121 	else
3122 		usbf_detach(udc);
3123 	spin_unlock_irqrestore(&udc->lock, flags);
3124 
3125 	return 0;
3126 }
3127 
3128 static int usbf_udc_set_selfpowered(struct usb_gadget *gadget,
3129 				    int is_selfpowered)
3130 {
3131 	struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
3132 	unsigned long flags;
3133 
3134 	spin_lock_irqsave(&udc->lock, flags);
3135 	gadget->is_selfpowered = (is_selfpowered != 0);
3136 	spin_unlock_irqrestore(&udc->lock, flags);
3137 
3138 	return 0;
3139 }
3140 
3141 static int usbf_udc_wakeup(struct usb_gadget *gadget)
3142 {
3143 	struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
3144 	unsigned long flags;
3145 	int ret;
3146 
3147 	spin_lock_irqsave(&udc->lock, flags);
3148 
3149 	if (!udc->is_remote_wakeup) {
3150 		dev_dbg(udc->dev, "remote wakeup not allowed\n");
3151 		ret = -EINVAL;
3152 		goto end;
3153 	}
3154 
3155 	dev_dbg(udc->dev, "do wakeup\n");
3156 
3157 	/* Send the resume signal */
3158 	usbf_reg_bitset(udc, USBF_REG_USB_CONTROL, USBF_USB_RSUM_IN);
3159 	usbf_reg_bitclr(udc, USBF_REG_USB_CONTROL, USBF_USB_RSUM_IN);
3160 
3161 	ret = 0;
3162 end:
3163 	spin_unlock_irqrestore(&udc->lock, flags);
3164 	return ret;
3165 }
3166 
3167 static struct usb_gadget_ops usbf_gadget_ops = {
3168 	.get_frame = usbf_get_frame,
3169 	.pullup = usbf_pullup,
3170 	.udc_start = usbf_udc_start,
3171 	.udc_stop = usbf_udc_stop,
3172 	.set_selfpowered = usbf_udc_set_selfpowered,
3173 	.wakeup = usbf_udc_wakeup,
3174 };
3175 
3176 static int usbf_epn_check(struct usbf_ep *epn)
3177 {
3178 	const char *type_txt;
3179 	const char *buf_txt;
3180 	int ret = 0;
3181 	u32 ctrl;
3182 
3183 	ctrl = usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL);
3184 
3185 	switch (ctrl & USBF_EPN_MODE_MASK) {
3186 	case USBF_EPN_MODE_BULK:
3187 		type_txt = "bulk";
3188 		if (epn->ep.caps.type_control || epn->ep.caps.type_iso ||
3189 		    !epn->ep.caps.type_bulk || epn->ep.caps.type_int) {
3190 			dev_err(epn->udc->dev,
3191 				"ep%u caps mismatch, bulk expected\n", epn->id);
3192 			ret = -EINVAL;
3193 		}
3194 		break;
3195 	case USBF_EPN_MODE_INTR:
3196 		type_txt = "intr";
3197 		if (epn->ep.caps.type_control || epn->ep.caps.type_iso ||
3198 		    epn->ep.caps.type_bulk || !epn->ep.caps.type_int) {
3199 			dev_err(epn->udc->dev,
3200 				"ep%u caps mismatch, int expected\n", epn->id);
3201 			ret = -EINVAL;
3202 		}
3203 		break;
3204 	case USBF_EPN_MODE_ISO:
3205 		type_txt = "iso";
3206 		if (epn->ep.caps.type_control || !epn->ep.caps.type_iso ||
3207 		    epn->ep.caps.type_bulk || epn->ep.caps.type_int) {
3208 			dev_err(epn->udc->dev,
3209 				"ep%u caps mismatch, iso expected\n", epn->id);
3210 			ret = -EINVAL;
3211 		}
3212 		break;
3213 	default:
3214 		type_txt = "unknown";
3215 		dev_err(epn->udc->dev, "ep%u unknown type\n", epn->id);
3216 		ret = -EINVAL;
3217 		break;
3218 	}
3219 
3220 	if (ctrl & USBF_EPN_BUF_TYPE_DOUBLE) {
3221 		buf_txt = "double";
3222 		if (!usbf_ep_info[epn->id].is_double) {
3223 			dev_err(epn->udc->dev,
3224 				"ep%u buffer mismatch, double expected\n",
3225 				epn->id);
3226 			ret = -EINVAL;
3227 		}
3228 	} else {
3229 		buf_txt = "single";
3230 		if (usbf_ep_info[epn->id].is_double) {
3231 			dev_err(epn->udc->dev,
3232 				"ep%u buffer mismatch, single expected\n",
3233 				epn->id);
3234 			ret = -EINVAL;
3235 		}
3236 	}
3237 
3238 	dev_dbg(epn->udc->dev, "ep%u (%s) %s, %s buffer %u, checked %s\n",
3239 		 epn->id, epn->ep.name, type_txt, buf_txt,
3240 		 epn->ep.maxpacket_limit, ret ? "failed" : "ok");
3241 
3242 	return ret;
3243 }
3244 
3245 static int usbf_probe(struct platform_device *pdev)
3246 {
3247 	struct device *dev = &pdev->dev;
3248 	struct usbf_udc *udc;
3249 	struct usbf_ep *ep;
3250 	unsigned int i;
3251 	int irq;
3252 	int ret;
3253 
3254 	udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
3255 	if (!udc)
3256 		return -ENOMEM;
3257 	platform_set_drvdata(pdev, udc);
3258 
3259 	udc->dev = dev;
3260 	spin_lock_init(&udc->lock);
3261 
3262 	udc->regs = devm_platform_ioremap_resource(pdev, 0);
3263 	if (IS_ERR(udc->regs))
3264 		return PTR_ERR(udc->regs);
3265 
3266 	devm_pm_runtime_enable(&pdev->dev);
3267 	ret = pm_runtime_resume_and_get(&pdev->dev);
3268 	if (ret < 0)
3269 		return ret;
3270 
3271 	dev_info(dev, "USBF version: %08x\n",
3272 		usbf_reg_readl(udc, USBF_REG_USBSSVER));
3273 
3274 	/* Resetting the PLL is handled via the clock driver as it has common
3275 	 * registers with USB Host
3276 	 */
3277 	usbf_reg_bitclr(udc, USBF_REG_EPCTR, USBF_SYS_EPC_RST);
3278 
3279 	/* modify in register gadget process */
3280 	udc->gadget.speed = USB_SPEED_FULL;
3281 	udc->gadget.max_speed = USB_SPEED_HIGH;
3282 	udc->gadget.ops = &usbf_gadget_ops;
3283 
3284 	udc->gadget.name = dev->driver->name;
3285 	udc->gadget.dev.parent = dev;
3286 	udc->gadget.ep0 = &udc->ep[0].ep;
3287 
3288 	/* The hardware DMA controller needs dma addresses aligned on 32bit.
3289 	 * A fallback to pio is done if DMA addresses are not aligned.
3290 	 */
3291 	udc->gadget.quirk_avoids_skb_reserve = 1;
3292 
3293 	INIT_LIST_HEAD(&udc->gadget.ep_list);
3294 	/* we have a canned request structure to allow sending packets as reply
3295 	 * to get_status requests
3296 	 */
3297 	INIT_LIST_HEAD(&udc->setup_reply.queue);
3298 
3299 	for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
3300 		ep = &udc->ep[i];
3301 
3302 		if (!(usbf_reg_readl(udc, USBF_REG_USBSSCONF) &
3303 		      USBF_SYS_EP_AVAILABLE(i))) {
3304 			continue;
3305 		}
3306 
3307 		INIT_LIST_HEAD(&ep->queue);
3308 
3309 		ep->id = i;
3310 		ep->disabled = 1;
3311 		ep->udc = udc;
3312 		ep->ep.ops = &usbf_ep_ops;
3313 		ep->ep.name = usbf_ep_info[i].name;
3314 		ep->ep.caps = usbf_ep_info[i].caps;
3315 		usb_ep_set_maxpacket_limit(&ep->ep,
3316 					   usbf_ep_info[i].maxpacket_limit);
3317 
3318 		if (ep->id == 0) {
3319 			ep->regs = ep->udc->regs + USBF_BASE_EP0;
3320 		} else {
3321 			ep->regs = ep->udc->regs + USBF_BASE_EPN(ep->id - 1);
3322 			ret = usbf_epn_check(ep);
3323 			if (ret)
3324 				return ret;
3325 			if (usbf_reg_readl(udc, USBF_REG_USBSSCONF) &
3326 			    USBF_SYS_DMA_AVAILABLE(i)) {
3327 				ep->dma_regs = ep->udc->regs +
3328 					       USBF_BASE_DMA_EPN(ep->id - 1);
3329 			}
3330 			list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
3331 		}
3332 	}
3333 
3334 	irq = platform_get_irq(pdev, 0);
3335 	if (irq < 0)
3336 		return irq;
3337 	ret = devm_request_irq(dev, irq, usbf_epc_irq, 0, "usbf-epc", udc);
3338 	if (ret) {
3339 		dev_err(dev, "cannot request irq %d err %d\n", irq, ret);
3340 		return ret;
3341 	}
3342 
3343 	irq = platform_get_irq(pdev, 1);
3344 	if (irq < 0)
3345 		return irq;
3346 	ret = devm_request_irq(dev, irq, usbf_ahb_epc_irq, 0, "usbf-ahb-epc", udc);
3347 	if (ret) {
3348 		dev_err(dev, "cannot request irq %d err %d\n", irq, ret);
3349 		return ret;
3350 	}
3351 
3352 	usbf_reg_bitset(udc, USBF_REG_AHBMCTR, USBF_SYS_WBURST_TYPE);
3353 
3354 	usbf_reg_bitset(udc, USBF_REG_USB_CONTROL,
3355 		USBF_USB_INT_SEL | USBF_USB_SOF_RCV | USBF_USB_SOF_CLK_MODE);
3356 
3357 	ret = usb_add_gadget_udc(dev, &udc->gadget);
3358 	if (ret)
3359 		return ret;
3360 
3361 	return 0;
3362 }
3363 
3364 static void usbf_remove(struct platform_device *pdev)
3365 {
3366 	struct usbf_udc *udc = platform_get_drvdata(pdev);
3367 
3368 	usb_del_gadget_udc(&udc->gadget);
3369 
3370 	pm_runtime_put(&pdev->dev);
3371 }
3372 
3373 static const struct of_device_id usbf_match[] = {
3374 	{ .compatible = "renesas,rzn1-usbf" },
3375 	{} /* sentinel */
3376 };
3377 MODULE_DEVICE_TABLE(of, usbf_match);
3378 
3379 static struct platform_driver udc_driver = {
3380 	.driver = {
3381 		.name = "usbf_renesas",
3382 		.owner = THIS_MODULE,
3383 		.of_match_table = usbf_match,
3384 	},
3385 	.probe          = usbf_probe,
3386 	.remove_new     = usbf_remove,
3387 };
3388 
3389 module_platform_driver(udc_driver);
3390 
3391 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
3392 MODULE_DESCRIPTION("Renesas R-Car Gen3 & RZ/N1 USB Function driver");
3393 MODULE_LICENSE("GPL");
3394