1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Renesas USBF USB Function driver
4  *
5  * Copyright 2022 Schneider Electric
6  * Author: Herve Codina <herve.codina@bootlin.com>
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/iopoll.h>
13 #include <linux/kernel.h>
14 #include <linux/kfifo.h>
15 #include <linux/module.h>
16 #include <linux/of_address.h>
17 #include <linux/of_irq.h>
18 #include <linux/of_platform.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/types.h>
21 #include <linux/usb/composite.h>
22 #include <linux/usb/gadget.h>
23 #include <linux/usb/role.h>
24 
25 #define USBF_NUM_ENDPOINTS	16
26 #define USBF_EP0_MAX_PCKT_SIZE	64
27 
28 /* EPC registers */
29 #define USBF_REG_USB_CONTROL	0x000
30 #define     USBF_USB_PUE2		BIT(2)
31 #define     USBF_USB_CONNECTB		BIT(3)
32 #define     USBF_USB_DEFAULT		BIT(4)
33 #define     USBF_USB_CONF		BIT(5)
34 #define     USBF_USB_SUSPEND		BIT(6)
35 #define     USBF_USB_RSUM_IN		BIT(7)
36 #define     USBF_USB_SOF_RCV		BIT(8)
37 #define     USBF_USB_FORCEFS		BIT(9)
38 #define     USBF_USB_INT_SEL		BIT(10)
39 #define     USBF_USB_SOF_CLK_MODE	BIT(11)
40 
41 #define USBF_REG_USB_STATUS	0x004
42 #define     USBF_USB_RSUM_OUT		BIT(1)
43 #define     USBF_USB_SPND_OUT		BIT(2)
44 #define     USBF_USB_USB_RST		BIT(3)
45 #define     USBF_USB_DEFAULT_ST		BIT(4)
46 #define     USBF_USB_CONF_ST		BIT(5)
47 #define     USBF_USB_SPEED_MODE		BIT(6)
48 #define     USBF_USB_SOF_DELAY_STATUS	BIT(31)
49 
50 #define USBF_REG_USB_ADDRESS	0x008
51 #define     USBF_USB_SOF_STATUS		BIT(15)
52 #define     USBF_USB_SET_USB_ADDR(_a)	((_a) << 16)
53 #define     USBF_USB_GET_FRAME(_r)	((_r) & 0x7FF)
54 
55 #define USBF_REG_SETUP_DATA0	0x018
56 #define USBF_REG_SETUP_DATA1	0x01C
57 #define USBF_REG_USB_INT_STA	0x020
58 #define     USBF_USB_RSUM_INT		BIT(1)
59 #define     USBF_USB_SPND_INT		BIT(2)
60 #define     USBF_USB_USB_RST_INT	BIT(3)
61 #define     USBF_USB_SOF_INT		BIT(4)
62 #define     USBF_USB_SOF_ERROR_INT	BIT(5)
63 #define     USBF_USB_SPEED_MODE_INT	BIT(6)
64 #define     USBF_USB_EPN_INT(_n)	(BIT(8) << (_n)) /* n=0..15 */
65 
66 #define USBF_REG_USB_INT_ENA	0x024
67 #define     USBF_USB_RSUM_EN		BIT(1)
68 #define     USBF_USB_SPND_EN		BIT(2)
69 #define     USBF_USB_USB_RST_EN		BIT(3)
70 #define     USBF_USB_SOF_EN		BIT(4)
71 #define     USBF_USB_SOF_ERROR_EN	BIT(5)
72 #define     USBF_USB_SPEED_MODE_EN	BIT(6)
73 #define     USBF_USB_EPN_EN(_n)		(BIT(8) << (_n)) /* n=0..15 */
74 
75 #define USBF_BASE_EP0		0x028
76 /* EP0 registers offsets from Base + USBF_BASE_EP0 (EP0 regs area) */
77 #define     USBF_REG_EP0_CONTROL	0x00
78 #define         USBF_EP0_ONAK			BIT(0)
79 #define         USBF_EP0_INAK			BIT(1)
80 #define         USBF_EP0_STL			BIT(2)
81 #define         USBF_EP0_PERR_NAK_CLR		BIT(3)
82 #define         USBF_EP0_INAK_EN		BIT(4)
83 #define         USBF_EP0_DW_MASK		(0x3 << 5)
84 #define         USBF_EP0_DW(_s)			((_s) << 5)
85 #define         USBF_EP0_DEND			BIT(7)
86 #define         USBF_EP0_BCLR			BIT(8)
87 #define         USBF_EP0_PIDCLR			BIT(9)
88 #define         USBF_EP0_AUTO			BIT(16)
89 #define         USBF_EP0_OVERSEL		BIT(17)
90 #define         USBF_EP0_STGSEL			BIT(18)
91 
92 #define     USBF_REG_EP0_STATUS		0x04
93 #define         USBF_EP0_SETUP_INT		BIT(0)
94 #define         USBF_EP0_STG_START_INT		BIT(1)
95 #define         USBF_EP0_STG_END_INT		BIT(2)
96 #define         USBF_EP0_STALL_INT		BIT(3)
97 #define         USBF_EP0_IN_INT			BIT(4)
98 #define         USBF_EP0_OUT_INT		BIT(5)
99 #define         USBF_EP0_OUT_OR_INT		BIT(6)
100 #define         USBF_EP0_OUT_NULL_INT		BIT(7)
101 #define         USBF_EP0_IN_EMPTY		BIT(8)
102 #define         USBF_EP0_IN_FULL		BIT(9)
103 #define         USBF_EP0_IN_DATA		BIT(10)
104 #define         USBF_EP0_IN_NAK_INT		BIT(11)
105 #define         USBF_EP0_OUT_EMPTY		BIT(12)
106 #define         USBF_EP0_OUT_FULL		BIT(13)
107 #define         USBF_EP0_OUT_NULL		BIT(14)
108 #define         USBF_EP0_OUT_NAK_INT		BIT(15)
109 #define         USBF_EP0_PERR_NAK_INT		BIT(16)
110 #define         USBF_EP0_PERR_NAK		BIT(17)
111 #define         USBF_EP0_PID			BIT(18)
112 
113 #define     USBF_REG_EP0_INT_ENA	0x08
114 #define         USBF_EP0_SETUP_EN		BIT(0)
115 #define         USBF_EP0_STG_START_EN		BIT(1)
116 #define         USBF_EP0_STG_END_EN		BIT(2)
117 #define         USBF_EP0_STALL_EN		BIT(3)
118 #define         USBF_EP0_IN_EN			BIT(4)
119 #define         USBF_EP0_OUT_EN			BIT(5)
120 #define         USBF_EP0_OUT_OR_EN		BIT(6)
121 #define         USBF_EP0_OUT_NULL_EN		BIT(7)
122 #define         USBF_EP0_IN_NAK_EN		BIT(11)
123 #define         USBF_EP0_OUT_NAK_EN		BIT(15)
124 #define         USBF_EP0_PERR_NAK_EN		BIT(16)
125 
126 #define     USBF_REG_EP0_LENGTH		0x0C
127 #define         USBF_EP0_LDATA			(0x7FF << 0)
128 #define     USBF_REG_EP0_READ		0x10
129 #define     USBF_REG_EP0_WRITE		0x14
130 
131 #define USBF_BASE_EPN(_n)	(0x040 + (_n) * 0x020)
132 /* EPn registers offsets from Base + USBF_BASE_EPN(n-1). n=1..15 */
133 #define     USBF_REG_EPN_CONTROL	0x000
134 #define         USBF_EPN_ONAK			BIT(0)
135 #define         USBF_EPN_OSTL			BIT(2)
136 #define         USBF_EPN_ISTL			BIT(3)
137 #define         USBF_EPN_OSTL_EN		BIT(4)
138 #define         USBF_EPN_DW_MASK		(0x3 << 5)
139 #define         USBF_EPN_DW(_s)			((_s) << 5)
140 #define         USBF_EPN_DEND			BIT(7)
141 #define         USBF_EPN_CBCLR			BIT(8)
142 #define         USBF_EPN_BCLR			BIT(9)
143 #define         USBF_EPN_OPIDCLR		BIT(10)
144 #define         USBF_EPN_IPIDCLR		BIT(11)
145 #define         USBF_EPN_AUTO			BIT(16)
146 #define         USBF_EPN_OVERSEL		BIT(17)
147 #define         USBF_EPN_MODE_MASK		(0x3 << 24)
148 #define         USBF_EPN_MODE_BULK		(0x0 << 24)
149 #define         USBF_EPN_MODE_INTR		(0x1 << 24)
150 #define         USBF_EPN_MODE_ISO		(0x2 << 24)
151 #define         USBF_EPN_DIR0			BIT(26)
152 #define         USBF_EPN_BUF_TYPE_DOUBLE	BIT(30)
153 #define         USBF_EPN_EN			BIT(31)
154 
155 #define     USBF_REG_EPN_STATUS		0x004
156 #define         USBF_EPN_IN_EMPTY		BIT(0)
157 #define         USBF_EPN_IN_FULL		BIT(1)
158 #define         USBF_EPN_IN_DATA		BIT(2)
159 #define         USBF_EPN_IN_INT			BIT(3)
160 #define         USBF_EPN_IN_STALL_INT		BIT(4)
161 #define         USBF_EPN_IN_NAK_ERR_INT		BIT(5)
162 #define         USBF_EPN_IN_END_INT		BIT(7)
163 #define         USBF_EPN_IPID			BIT(10)
164 #define         USBF_EPN_OUT_EMPTY		BIT(16)
165 #define         USBF_EPN_OUT_FULL		BIT(17)
166 #define         USBF_EPN_OUT_NULL_INT		BIT(18)
167 #define         USBF_EPN_OUT_INT		BIT(19)
168 #define         USBF_EPN_OUT_STALL_INT		BIT(20)
169 #define         USBF_EPN_OUT_NAK_ERR_INT	BIT(21)
170 #define         USBF_EPN_OUT_OR_INT		BIT(22)
171 #define         USBF_EPN_OUT_END_INT		BIT(23)
172 #define         USBF_EPN_ISO_CRC		BIT(24)
173 #define         USBF_EPN_ISO_OR			BIT(26)
174 #define         USBF_EPN_OUT_NOTKN		BIT(27)
175 #define         USBF_EPN_ISO_OPID		BIT(28)
176 #define         USBF_EPN_ISO_PIDERR		BIT(29)
177 
178 #define     USBF_REG_EPN_INT_ENA	0x008
179 #define         USBF_EPN_IN_EN			BIT(3)
180 #define         USBF_EPN_IN_STALL_EN		BIT(4)
181 #define         USBF_EPN_IN_NAK_ERR_EN		BIT(5)
182 #define         USBF_EPN_IN_END_EN		BIT(7)
183 #define         USBF_EPN_OUT_NULL_EN		BIT(18)
184 #define         USBF_EPN_OUT_EN			BIT(19)
185 #define         USBF_EPN_OUT_STALL_EN		BIT(20)
186 #define         USBF_EPN_OUT_NAK_ERR_EN		BIT(21)
187 #define         USBF_EPN_OUT_OR_EN		BIT(22)
188 #define         USBF_EPN_OUT_END_EN		BIT(23)
189 
190 #define     USBF_REG_EPN_DMA_CTRL	0x00C
191 #define         USBF_EPN_DMAMODE0		BIT(0)
192 #define         USBF_EPN_DMA_EN			BIT(4)
193 #define         USBF_EPN_STOP_SET		BIT(8)
194 #define         USBF_EPN_BURST_SET		BIT(9)
195 #define         USBF_EPN_DEND_SET		BIT(10)
196 #define         USBF_EPN_STOP_MODE		BIT(11)
197 
198 #define     USBF_REG_EPN_PCKT_ADRS	0x010
199 #define         USBF_EPN_MPKT(_l)		((_l) << 0)
200 #define         USBF_EPN_BASEAD(_a)		((_a) << 16)
201 
202 #define     USBF_REG_EPN_LEN_DCNT	0x014
203 #define         USBF_EPN_GET_LDATA(_r)		((_r) & 0x7FF)
204 #define         USBF_EPN_SET_DMACNT(_c)		((_c) << 16)
205 #define         USBF_EPN_GET_DMACNT(_r)		(((_r) >> 16) & 0x1ff)
206 
207 #define     USBF_REG_EPN_READ		0x018
208 #define     USBF_REG_EPN_WRITE		0x01C
209 
210 /* AHB-EPC Bridge registers */
211 #define USBF_REG_AHBSCTR	0x1000
212 #define USBF_REG_AHBMCTR	0x1004
213 #define     USBF_SYS_WBURST_TYPE	BIT(2)
214 #define     USBF_SYS_ARBITER_CTR	BIT(31)
215 
216 #define USBF_REG_AHBBINT	0x1008
217 #define     USBF_SYS_ERR_MASTER		 (0x0F << 0)
218 #define     USBF_SYS_SBUS_ERRINT0	 BIT(4)
219 #define     USBF_SYS_SBUS_ERRINT1	 BIT(5)
220 #define     USBF_SYS_MBUS_ERRINT	 BIT(6)
221 #define     USBF_SYS_VBUS_INT		 BIT(13)
222 #define     USBF_SYS_DMA_ENDINT_EPN(_n)	 (BIT(16) << (_n)) /* _n=1..15 */
223 
224 #define USBF_REG_AHBBINTEN	0x100C
225 #define     USBF_SYS_SBUS_ERRINT0EN	  BIT(4)
226 #define     USBF_SYS_SBUS_ERRINT1EN	  BIT(5)
227 #define     USBF_SYS_MBUS_ERRINTEN	  BIT(6)
228 #define     USBF_SYS_VBUS_INTEN		  BIT(13)
229 #define     USBF_SYS_DMA_ENDINTEN_EPN(_n) (BIT(16) << (_n)) /* _n=1..15 */
230 
231 #define USBF_REG_EPCTR		0x1010
232 #define     USBF_SYS_EPC_RST		BIT(0)
233 #define     USBF_SYS_PLL_RST		BIT(2)
234 #define     USBF_SYS_PLL_LOCK		BIT(4)
235 #define     USBF_SYS_PLL_RESUME		BIT(5)
236 #define     USBF_SYS_VBUS_LEVEL		BIT(8)
237 #define     USBF_SYS_DIRPD		BIT(12)
238 
239 #define USBF_REG_USBSSVER	0x1020
240 #define USBF_REG_USBSSCONF	0x1024
241 #define    USBF_SYS_DMA_AVAILABLE(_n)	(BIT(0) << (_n)) /* _n=0..15 */
242 #define    USBF_SYS_EP_AVAILABLE(_n)	(BIT(16) << (_n)) /* _n=0..15 */
243 
244 #define USBF_BASE_DMA_EPN(_n)	(0x1110 + (_n) * 0x010)
245 /* EPn DMA registers offsets from Base USBF_BASE_DMA_EPN(n-1). n=1..15*/
246 #define     USBF_REG_DMA_EPN_DCR1	0x00
247 #define         USBF_SYS_EPN_REQEN		BIT(0)
248 #define         USBF_SYS_EPN_DIR0		BIT(1)
249 #define         USBF_SYS_EPN_SET_DMACNT(_c)	((_c) << 16)
250 #define         USBF_SYS_EPN_GET_DMACNT(_r)	(((_r) >> 16) & 0x0FF)
251 
252 #define     USBF_REG_DMA_EPN_DCR2	0x04
253 #define         USBF_SYS_EPN_MPKT(_s)		((_s) << 0)
254 #define         USBF_SYS_EPN_LMPKT(_l)		((_l) << 16)
255 
256 #define     USBF_REG_DMA_EPN_TADR	0x08
257 
258 /* USB request */
259 struct usbf_req {
260 	struct usb_request	req;
261 	struct list_head	queue;
262 	unsigned int		is_zero_sent : 1;
263 	unsigned int		is_mapped : 1;
264 	enum {
265 		USBF_XFER_START,
266 		USBF_XFER_WAIT_DMA,
267 		USBF_XFER_SEND_NULL,
268 		USBF_XFER_WAIT_END,
269 		USBF_XFER_WAIT_DMA_SHORT,
270 		USBF_XFER_WAIT_BRIDGE,
271 	}			xfer_step;
272 	size_t			dma_size;
273 };
274 
275 /* USB Endpoint */
276 struct usbf_ep {
277 	struct usb_ep		ep;
278 	char			name[32];
279 	struct list_head	queue;
280 	unsigned int		is_processing : 1;
281 	unsigned int		is_in : 1;
282 	struct			usbf_udc *udc;
283 	void __iomem		*regs;
284 	void __iomem		*dma_regs;
285 	unsigned int		id : 8;
286 	unsigned int		disabled : 1;
287 	unsigned int		is_wedged : 1;
288 	unsigned int		delayed_status : 1;
289 	u32			status;
290 	void			(*bridge_on_dma_end)(struct usbf_ep *ep);
291 };
292 
293 enum usbf_ep0state {
294 	EP0_IDLE,
295 	EP0_IN_DATA_PHASE,
296 	EP0_OUT_DATA_PHASE,
297 	EP0_OUT_STATUS_START_PHASE,
298 	EP0_OUT_STATUS_PHASE,
299 	EP0_OUT_STATUS_END_PHASE,
300 	EP0_IN_STATUS_START_PHASE,
301 	EP0_IN_STATUS_PHASE,
302 	EP0_IN_STATUS_END_PHASE,
303 };
304 
305 struct usbf_udc {
306 	struct usb_gadget		gadget;
307 	struct usb_gadget_driver	*driver;
308 	struct device			*dev;
309 	void __iomem			*regs;
310 	spinlock_t			lock;
311 	bool				is_remote_wakeup;
312 	bool				is_usb_suspended;
313 	struct usbf_ep			ep[USBF_NUM_ENDPOINTS];
314 	/* for EP0 control messages */
315 	enum usbf_ep0state		ep0state;
316 	struct usbf_req			setup_reply;
317 	u8				ep0_buf[USBF_EP0_MAX_PCKT_SIZE];
318 };
319 
320 struct usbf_ep_info {
321 	const char		*name;
322 	struct usb_ep_caps	caps;
323 	u16			base_addr;
324 	unsigned int		is_double : 1;
325 	u16			maxpacket_limit;
326 };
327 
328 #define USBF_SINGLE_BUFFER 0
329 #define USBF_DOUBLE_BUFFER 1
330 #define USBF_EP_INFO(_name, _caps, _base_addr, _is_double, _maxpacket_limit)  \
331 	{                                                                     \
332 		.name            = _name,                                     \
333 		.caps            = _caps,                                     \
334 		.base_addr       = _base_addr,                                \
335 		.is_double       = _is_double,                                \
336 		.maxpacket_limit = _maxpacket_limit,                          \
337 	}
338 
339 /* This table is computed from the recommended values provided in the SOC
340  * datasheet. The buffer type (single/double) and the endpoint type cannot
341  * be changed. The mapping in internal RAM (base_addr and number of words)
342  * for each endpoints depends on the max packet size and the buffer type.
343  */
344 static const struct usbf_ep_info usbf_ep_info[USBF_NUM_ENDPOINTS] = {
345 	/* ep0: buf @0x0000 64 bytes, fixed 32 words */
346 	[0] = USBF_EP_INFO("ep0-ctrl",
347 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
348 				       USB_EP_CAPS_DIR_ALL),
349 			   0x0000, USBF_SINGLE_BUFFER, USBF_EP0_MAX_PCKT_SIZE),
350 	/* ep1: buf @0x0020, 2 buffers 512 bytes -> (512 * 2 / 4) words */
351 	[1] = USBF_EP_INFO("ep1-bulk",
352 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
353 				       USB_EP_CAPS_DIR_ALL),
354 			   0x0020, USBF_DOUBLE_BUFFER, 512),
355 	/* ep2: buf @0x0120, 2 buffers 512 bytes -> (512 * 2 / 4) words */
356 	[2] = USBF_EP_INFO("ep2-bulk",
357 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
358 				       USB_EP_CAPS_DIR_ALL),
359 			   0x0120, USBF_DOUBLE_BUFFER, 512),
360 	/* ep3: buf @0x0220, 1 buffer 512 bytes -> (512 * 2 / 4) words */
361 	[3] = USBF_EP_INFO("ep3-bulk",
362 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
363 				       USB_EP_CAPS_DIR_ALL),
364 			   0x0220, USBF_SINGLE_BUFFER, 512),
365 	/* ep4: buf @0x02A0, 1 buffer 512 bytes -> (512 * 1 / 4) words */
366 	[4] = USBF_EP_INFO("ep4-bulk",
367 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
368 				       USB_EP_CAPS_DIR_ALL),
369 			   0x02A0, USBF_SINGLE_BUFFER, 512),
370 	/* ep5: buf @0x0320, 1 buffer 512 bytes -> (512 * 2 / 4) words */
371 	[5] = USBF_EP_INFO("ep5-bulk",
372 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
373 				       USB_EP_CAPS_DIR_ALL),
374 			   0x0320, USBF_SINGLE_BUFFER, 512),
375 	/* ep6: buf @0x03A0, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
376 	[6] = USBF_EP_INFO("ep6-int",
377 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
378 				       USB_EP_CAPS_DIR_ALL),
379 			   0x03A0, USBF_SINGLE_BUFFER, 1024),
380 	/* ep7: buf @0x04A0, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
381 	[7] = USBF_EP_INFO("ep7-int",
382 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
383 				       USB_EP_CAPS_DIR_ALL),
384 			   0x04A0, USBF_SINGLE_BUFFER, 1024),
385 	/* ep8: buf @0x0520, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
386 	[8] = USBF_EP_INFO("ep8-int",
387 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
388 				       USB_EP_CAPS_DIR_ALL),
389 			   0x0520, USBF_SINGLE_BUFFER, 1024),
390 	/* ep9: buf @0x0620, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
391 	[9] = USBF_EP_INFO("ep9-int",
392 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
393 				       USB_EP_CAPS_DIR_ALL),
394 			   0x0620, USBF_SINGLE_BUFFER, 1024),
395 	/* ep10: buf @0x0720, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
396 	[10] = USBF_EP_INFO("ep10-iso",
397 			    USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
398 					USB_EP_CAPS_DIR_ALL),
399 			    0x0720, USBF_DOUBLE_BUFFER, 1024),
400 	/* ep11: buf @0x0920, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
401 	[11] = USBF_EP_INFO("ep11-iso",
402 			    USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
403 					USB_EP_CAPS_DIR_ALL),
404 			    0x0920, USBF_DOUBLE_BUFFER, 1024),
405 	/* ep12: buf @0x0B20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
406 	[12] = USBF_EP_INFO("ep12-iso",
407 			    USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
408 					USB_EP_CAPS_DIR_ALL),
409 			    0x0B20, USBF_DOUBLE_BUFFER, 1024),
410 	/* ep13: buf @0x0D20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
411 	[13] = USBF_EP_INFO("ep13-iso",
412 			    USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
413 					USB_EP_CAPS_DIR_ALL),
414 			    0x0D20, USBF_DOUBLE_BUFFER, 1024),
415 	/* ep14: buf @0x0F20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
416 	[14] = USBF_EP_INFO("ep14-iso",
417 			    USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
418 					USB_EP_CAPS_DIR_ALL),
419 			    0x0F20, USBF_DOUBLE_BUFFER, 1024),
420 	/* ep15: buf @0x1120, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
421 	[15] = USBF_EP_INFO("ep15-iso",
422 			    USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
423 					USB_EP_CAPS_DIR_ALL),
424 			    0x1120, USBF_DOUBLE_BUFFER, 1024),
425 };
426 
427 static inline u32 usbf_reg_readl(struct usbf_udc *udc, uint offset)
428 {
429 	return readl(udc->regs + offset);
430 }
431 
432 static inline void usbf_reg_writel(struct usbf_udc *udc, uint offset, u32 val)
433 {
434 	writel(val, udc->regs + offset);
435 }
436 
437 static inline void usbf_reg_bitset(struct usbf_udc *udc, uint offset, u32 set)
438 {
439 	u32 tmp;
440 
441 	tmp = usbf_reg_readl(udc, offset);
442 	tmp |= set;
443 	usbf_reg_writel(udc, offset, tmp);
444 }
445 
446 static inline void usbf_reg_bitclr(struct usbf_udc *udc, uint offset, u32 clr)
447 {
448 	u32 tmp;
449 
450 	tmp = usbf_reg_readl(udc, offset);
451 	tmp &= ~clr;
452 	usbf_reg_writel(udc, offset, tmp);
453 }
454 
455 static inline void usbf_reg_clrset(struct usbf_udc *udc, uint offset,
456 				   u32 clr, u32 set)
457 {
458 	u32 tmp;
459 
460 	tmp = usbf_reg_readl(udc, offset);
461 	tmp &= ~clr;
462 	tmp |= set;
463 	usbf_reg_writel(udc, offset, tmp);
464 }
465 
466 static inline u32 usbf_ep_reg_readl(struct usbf_ep *ep, uint offset)
467 {
468 	return readl(ep->regs + offset);
469 }
470 
471 static inline void usbf_ep_reg_read_rep(struct usbf_ep *ep, uint offset,
472 				       void *dst, uint count)
473 {
474 	readsl(ep->regs + offset, dst, count);
475 }
476 
477 static inline void usbf_ep_reg_writel(struct usbf_ep *ep, uint offset, u32 val)
478 {
479 	writel(val, ep->regs + offset);
480 }
481 
482 static inline void usbf_ep_reg_write_rep(struct usbf_ep *ep, uint offset,
483 					 const void *src, uint count)
484 {
485 	writesl(ep->regs + offset, src, count);
486 }
487 
488 static inline void usbf_ep_reg_bitset(struct usbf_ep *ep, uint offset, u32 set)
489 {
490 	u32 tmp;
491 
492 	tmp = usbf_ep_reg_readl(ep, offset);
493 	tmp |= set;
494 	usbf_ep_reg_writel(ep, offset, tmp);
495 }
496 
497 static inline void usbf_ep_reg_bitclr(struct usbf_ep *ep, uint offset, u32 clr)
498 {
499 	u32 tmp;
500 
501 	tmp = usbf_ep_reg_readl(ep, offset);
502 	tmp &= ~clr;
503 	usbf_ep_reg_writel(ep, offset, tmp);
504 }
505 
506 static inline void usbf_ep_reg_clrset(struct usbf_ep *ep, uint offset,
507 				      u32 clr, u32 set)
508 {
509 	u32 tmp;
510 
511 	tmp = usbf_ep_reg_readl(ep, offset);
512 	tmp &= ~clr;
513 	tmp |= set;
514 	usbf_ep_reg_writel(ep, offset, tmp);
515 }
516 
517 static inline u32 usbf_ep_dma_reg_readl(struct usbf_ep *ep, uint offset)
518 {
519 	return readl(ep->dma_regs + offset);
520 }
521 
522 static inline void usbf_ep_dma_reg_writel(struct usbf_ep *ep, uint offset,
523 					  u32 val)
524 {
525 	writel(val, ep->dma_regs + offset);
526 }
527 
528 static inline void usbf_ep_dma_reg_bitset(struct usbf_ep *ep, uint offset,
529 					  u32 set)
530 {
531 	u32 tmp;
532 
533 	tmp = usbf_ep_dma_reg_readl(ep, offset);
534 	tmp |= set;
535 	usbf_ep_dma_reg_writel(ep, offset, tmp);
536 }
537 
538 static inline void usbf_ep_dma_reg_bitclr(struct usbf_ep *ep, uint offset,
539 					  u32 clr)
540 {
541 	u32 tmp;
542 
543 	tmp = usbf_ep_dma_reg_readl(ep, offset);
544 	tmp &= ~clr;
545 	usbf_ep_dma_reg_writel(ep, offset, tmp);
546 }
547 
548 static inline void usbf_ep_dma_reg_clrset(struct usbf_ep *ep, uint offset,
549 					  u32 clr, u32 set)
550 {
551 	u32 tmp;
552 
553 	tmp = usbf_ep_dma_reg_readl(ep, offset);
554 	tmp &= ~clr;
555 	tmp |= set;
556 	usbf_ep_dma_reg_writel(ep, offset, tmp);
557 }
558 
559 static void usbf_ep0_send_null(struct usbf_ep *ep0, bool is_data1)
560 {
561 	u32 set;
562 
563 	set = USBF_EP0_DEND;
564 	if (is_data1)
565 		set |= USBF_EP0_PIDCLR;
566 
567 	usbf_ep_reg_bitset(ep0, USBF_REG_EP0_CONTROL, set);
568 }
569 
570 static int usbf_ep0_pio_in(struct usbf_ep *ep0, struct usbf_req *req)
571 {
572 	unsigned int left;
573 	unsigned int nb;
574 	const void *buf;
575 	u32 ctrl;
576 	u32 last;
577 
578 	left = req->req.length - req->req.actual;
579 
580 	if (left == 0) {
581 		if (!req->is_zero_sent) {
582 			if (req->req.length == 0) {
583 				dev_dbg(ep0->udc->dev, "ep0 send null\n");
584 				usbf_ep0_send_null(ep0, false);
585 				req->is_zero_sent = 1;
586 				return -EINPROGRESS;
587 			}
588 			if ((req->req.actual % ep0->ep.maxpacket) == 0) {
589 				if (req->req.zero) {
590 					dev_dbg(ep0->udc->dev, "ep0 send null\n");
591 					usbf_ep0_send_null(ep0, false);
592 					req->is_zero_sent = 1;
593 					return -EINPROGRESS;
594 				}
595 			}
596 		}
597 		return 0;
598 	}
599 
600 	if (left > ep0->ep.maxpacket)
601 		left = ep0->ep.maxpacket;
602 
603 	buf = req->req.buf;
604 	buf += req->req.actual;
605 
606 	nb = left / sizeof(u32);
607 	if (nb) {
608 		usbf_ep_reg_write_rep(ep0, USBF_REG_EP0_WRITE, buf, nb);
609 		buf += (nb * sizeof(u32));
610 		req->req.actual += (nb * sizeof(u32));
611 		left -= (nb * sizeof(u32));
612 	}
613 	ctrl = usbf_ep_reg_readl(ep0, USBF_REG_EP0_CONTROL);
614 	ctrl &= ~USBF_EP0_DW_MASK;
615 	if (left) {
616 		memcpy(&last, buf, left);
617 		usbf_ep_reg_writel(ep0, USBF_REG_EP0_WRITE, last);
618 		ctrl |= USBF_EP0_DW(left);
619 		req->req.actual += left;
620 	}
621 	usbf_ep_reg_writel(ep0, USBF_REG_EP0_CONTROL, ctrl | USBF_EP0_DEND);
622 
623 	dev_dbg(ep0->udc->dev, "ep0 send %u/%u\n",
624 		req->req.actual, req->req.length);
625 
626 	return -EINPROGRESS;
627 }
628 
629 static int usbf_ep0_pio_out(struct usbf_ep *ep0, struct usbf_req *req)
630 {
631 	int req_status = 0;
632 	unsigned int count;
633 	unsigned int recv;
634 	unsigned int left;
635 	unsigned int nb;
636 	void *buf;
637 	u32 last;
638 
639 	if (ep0->status & USBF_EP0_OUT_INT) {
640 		recv = usbf_ep_reg_readl(ep0, USBF_REG_EP0_LENGTH) & USBF_EP0_LDATA;
641 		count = recv;
642 
643 		buf = req->req.buf;
644 		buf += req->req.actual;
645 
646 		left = req->req.length - req->req.actual;
647 
648 		dev_dbg(ep0->udc->dev, "ep0 recv %u, left %u\n", count, left);
649 
650 		if (left > ep0->ep.maxpacket)
651 			left = ep0->ep.maxpacket;
652 
653 		if (count > left) {
654 			req_status = -EOVERFLOW;
655 			count = left;
656 		}
657 
658 		if (count) {
659 			nb = count / sizeof(u32);
660 			if (nb) {
661 				usbf_ep_reg_read_rep(ep0, USBF_REG_EP0_READ,
662 					buf, nb);
663 				buf += (nb * sizeof(u32));
664 				req->req.actual += (nb * sizeof(u32));
665 				count -= (nb * sizeof(u32));
666 			}
667 			if (count) {
668 				last = usbf_ep_reg_readl(ep0, USBF_REG_EP0_READ);
669 				memcpy(buf, &last, count);
670 				req->req.actual += count;
671 			}
672 		}
673 		dev_dbg(ep0->udc->dev, "ep0 recv %u/%u\n",
674 			req->req.actual, req->req.length);
675 
676 		if (req_status) {
677 			dev_dbg(ep0->udc->dev, "ep0 req.status=%d\n", req_status);
678 			req->req.status = req_status;
679 			return 0;
680 		}
681 
682 		if (recv < ep0->ep.maxpacket) {
683 			dev_dbg(ep0->udc->dev, "ep0 short packet\n");
684 			/* This is a short packet -> It is the end */
685 			req->req.status = 0;
686 			return 0;
687 		}
688 
689 		/* The Data stage of a control transfer from an endpoint to the
690 		 * host is complete when the endpoint does one of the following:
691 		 * - Has transferred exactly the expected amount of data
692 		 * - Transfers a packet with a payload size less than
693 		 *   wMaxPacketSize or transfers a zero-length packet
694 		 */
695 		if (req->req.actual == req->req.length) {
696 			req->req.status = 0;
697 			return 0;
698 		}
699 	}
700 
701 	if (ep0->status & USBF_EP0_OUT_NULL_INT) {
702 		/* NULL packet received */
703 		dev_dbg(ep0->udc->dev, "ep0 null packet\n");
704 		if (req->req.actual != req->req.length) {
705 			req->req.status = req->req.short_not_ok ?
706 					  -EREMOTEIO : 0;
707 		} else {
708 			req->req.status = 0;
709 		}
710 		return 0;
711 	}
712 
713 	return -EINPROGRESS;
714 }
715 
716 static void usbf_ep0_fifo_flush(struct usbf_ep *ep0)
717 {
718 	u32 sts;
719 	int ret;
720 
721 	usbf_ep_reg_bitset(ep0, USBF_REG_EP0_CONTROL, USBF_EP0_BCLR);
722 
723 	ret = readl_poll_timeout_atomic(ep0->regs + USBF_REG_EP0_STATUS, sts,
724 		(sts & (USBF_EP0_IN_DATA | USBF_EP0_IN_EMPTY)) == USBF_EP0_IN_EMPTY,
725 		0,  10000);
726 	if (ret)
727 		dev_err(ep0->udc->dev, "ep0 flush fifo timed out\n");
728 
729 }
730 
731 static void usbf_epn_send_null(struct usbf_ep *epn)
732 {
733 	usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL, USBF_EPN_DEND);
734 }
735 
736 static void usbf_epn_send_residue(struct usbf_ep *epn, const void *buf,
737 				  unsigned int size)
738 {
739 	u32 tmp;
740 
741 	memcpy(&tmp, buf, size);
742 	usbf_ep_reg_writel(epn, USBF_REG_EPN_WRITE, tmp);
743 
744 	usbf_ep_reg_clrset(epn, USBF_REG_EPN_CONTROL,
745 				USBF_EPN_DW_MASK,
746 				USBF_EPN_DW(size) | USBF_EPN_DEND);
747 }
748 
749 static int usbf_epn_pio_in(struct usbf_ep *epn, struct usbf_req *req)
750 {
751 	unsigned int left;
752 	unsigned int nb;
753 	const void *buf;
754 
755 	left = req->req.length - req->req.actual;
756 
757 	if (left == 0) {
758 		if (!req->is_zero_sent) {
759 			if (req->req.length == 0) {
760 				dev_dbg(epn->udc->dev, "ep%u send_null\n", epn->id);
761 				usbf_epn_send_null(epn);
762 				req->is_zero_sent = 1;
763 				return -EINPROGRESS;
764 			}
765 			if ((req->req.actual % epn->ep.maxpacket) == 0) {
766 				if (req->req.zero) {
767 					dev_dbg(epn->udc->dev, "ep%u send_null\n",
768 						epn->id);
769 					usbf_epn_send_null(epn);
770 					req->is_zero_sent = 1;
771 					return -EINPROGRESS;
772 				}
773 			}
774 		}
775 		return 0;
776 	}
777 
778 	if (left > epn->ep.maxpacket)
779 		left = epn->ep.maxpacket;
780 
781 	buf = req->req.buf;
782 	buf += req->req.actual;
783 
784 	nb = left / sizeof(u32);
785 	if (nb) {
786 		usbf_ep_reg_write_rep(epn, USBF_REG_EPN_WRITE, buf, nb);
787 		buf += (nb * sizeof(u32));
788 		req->req.actual += (nb * sizeof(u32));
789 		left -= (nb * sizeof(u32));
790 	}
791 
792 	if (left) {
793 		usbf_epn_send_residue(epn, buf, left);
794 		req->req.actual += left;
795 	} else {
796 		usbf_ep_reg_clrset(epn, USBF_REG_EPN_CONTROL,
797 					USBF_EPN_DW_MASK,
798 					USBF_EPN_DEND);
799 	}
800 
801 	dev_dbg(epn->udc->dev, "ep%u send %u/%u\n", epn->id, req->req.actual,
802 		req->req.length);
803 
804 	return -EINPROGRESS;
805 }
806 
807 static void usbf_epn_enable_in_end_int(struct usbf_ep *epn)
808 {
809 	usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_IN_END_EN);
810 }
811 
812 static int usbf_epn_dma_in(struct usbf_ep *epn, struct usbf_req *req)
813 {
814 	unsigned int left;
815 	u32 npkt;
816 	u32 lastpkt;
817 	int ret;
818 
819 	if (!IS_ALIGNED((uintptr_t)req->req.buf, 4)) {
820 		dev_dbg(epn->udc->dev, "ep%u buf unaligned -> fallback pio\n",
821 			epn->id);
822 		return usbf_epn_pio_in(epn, req);
823 	}
824 
825 	left = req->req.length - req->req.actual;
826 
827 	switch (req->xfer_step) {
828 	default:
829 	case USBF_XFER_START:
830 		if (left == 0) {
831 			dev_dbg(epn->udc->dev, "ep%u send null\n", epn->id);
832 			usbf_epn_send_null(epn);
833 			req->xfer_step = USBF_XFER_WAIT_END;
834 			break;
835 		}
836 		if (left < 4) {
837 			dev_dbg(epn->udc->dev, "ep%u send residue %u\n", epn->id,
838 				left);
839 			usbf_epn_send_residue(epn,
840 				req->req.buf + req->req.actual, left);
841 			req->req.actual += left;
842 			req->xfer_step = USBF_XFER_WAIT_END;
843 			break;
844 		}
845 
846 		ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 1);
847 		if (ret < 0) {
848 			dev_err(epn->udc->dev, "usb_gadget_map_request failed (%d)\n",
849 				ret);
850 			return ret;
851 		}
852 		req->is_mapped = 1;
853 
854 		npkt = DIV_ROUND_UP(left, epn->ep.maxpacket);
855 		lastpkt = (left % epn->ep.maxpacket);
856 		if (lastpkt == 0)
857 			lastpkt = epn->ep.maxpacket;
858 		lastpkt &= ~0x3; /* DMA is done on 32bit units */
859 
860 		usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR2,
861 			USBF_SYS_EPN_MPKT(epn->ep.maxpacket) | USBF_SYS_EPN_LMPKT(lastpkt));
862 		usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_TADR,
863 			req->req.dma);
864 		usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
865 			USBF_SYS_EPN_SET_DMACNT(npkt));
866 		usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
867 			USBF_SYS_EPN_REQEN);
868 
869 		usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT, USBF_EPN_SET_DMACNT(npkt));
870 
871 		usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
872 
873 		/* The end of DMA transfer at the USBF level needs to be handle
874 		 * after the detection of the end of DMA transfer at the brige
875 		 * level.
876 		 * To force this sequence, EPN_IN_END_EN will be set by the
877 		 * detection of the end of transfer at bridge level (ie. bridge
878 		 * interrupt).
879 		 */
880 		usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
881 			USBF_EPN_IN_EN | USBF_EPN_IN_END_EN);
882 		epn->bridge_on_dma_end = usbf_epn_enable_in_end_int;
883 
884 		/* Clear any pending IN_END interrupt */
885 		usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(u32)USBF_EPN_IN_END_INT);
886 
887 		usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
888 			USBF_EPN_BURST_SET | USBF_EPN_DMAMODE0);
889 		usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
890 			USBF_EPN_DMA_EN);
891 
892 		req->dma_size = (npkt - 1) * epn->ep.maxpacket + lastpkt;
893 
894 		dev_dbg(epn->udc->dev, "ep%u dma xfer %zu\n", epn->id,
895 			req->dma_size);
896 
897 		req->xfer_step = USBF_XFER_WAIT_DMA;
898 		break;
899 
900 	case USBF_XFER_WAIT_DMA:
901 		if (!(epn->status & USBF_EPN_IN_END_INT)) {
902 			dev_dbg(epn->udc->dev, "ep%u dma not done\n", epn->id);
903 			break;
904 		}
905 		dev_dbg(epn->udc->dev, "ep%u dma done\n", epn->id);
906 
907 		usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 1);
908 		req->is_mapped = 0;
909 
910 		usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
911 
912 		usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
913 			USBF_EPN_IN_END_EN,
914 			USBF_EPN_IN_EN);
915 
916 		req->req.actual += req->dma_size;
917 
918 		left = req->req.length - req->req.actual;
919 		if (left) {
920 			usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(u32)USBF_EPN_IN_INT);
921 
922 			dev_dbg(epn->udc->dev, "ep%u send residue %u\n", epn->id,
923 				left);
924 			usbf_epn_send_residue(epn,
925 				req->req.buf + req->req.actual, left);
926 			req->req.actual += left;
927 			req->xfer_step = USBF_XFER_WAIT_END;
928 			break;
929 		}
930 
931 		if (req->req.actual % epn->ep.maxpacket) {
932 			/* last packet was a short packet. Tell the hardware to
933 			 * send it right now.
934 			 */
935 			dev_dbg(epn->udc->dev, "ep%u send short\n", epn->id);
936 			usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
937 				~(u32)USBF_EPN_IN_INT);
938 			usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL,
939 				USBF_EPN_DEND);
940 
941 			req->xfer_step = USBF_XFER_WAIT_END;
942 			break;
943 		}
944 
945 		/* Last packet size was a maxpacket size
946 		 * Send null packet if needed
947 		 */
948 		if (req->req.zero) {
949 			req->xfer_step = USBF_XFER_SEND_NULL;
950 			break;
951 		}
952 
953 		/* No more action to do. Wait for the end of the USB transfer */
954 		req->xfer_step = USBF_XFER_WAIT_END;
955 		break;
956 
957 	case USBF_XFER_SEND_NULL:
958 		dev_dbg(epn->udc->dev, "ep%u send null\n", epn->id);
959 		usbf_epn_send_null(epn);
960 		req->xfer_step = USBF_XFER_WAIT_END;
961 		break;
962 
963 	case USBF_XFER_WAIT_END:
964 		if (!(epn->status & USBF_EPN_IN_INT)) {
965 			dev_dbg(epn->udc->dev, "ep%u end not done\n", epn->id);
966 			break;
967 		}
968 		dev_dbg(epn->udc->dev, "ep%u send done %u/%u\n", epn->id,
969 			req->req.actual, req->req.length);
970 		req->xfer_step = USBF_XFER_START;
971 		return 0;
972 	}
973 
974 	return -EINPROGRESS;
975 }
976 
977 static void usbf_epn_recv_residue(struct usbf_ep *epn, void *buf,
978 				  unsigned int size)
979 {
980 	u32 last;
981 
982 	last = usbf_ep_reg_readl(epn, USBF_REG_EPN_READ);
983 	memcpy(buf, &last, size);
984 }
985 
986 static int usbf_epn_pio_out(struct usbf_ep *epn, struct usbf_req *req)
987 {
988 	int req_status = 0;
989 	unsigned int count;
990 	unsigned int recv;
991 	unsigned int left;
992 	unsigned int nb;
993 	void *buf;
994 
995 	if (epn->status & USBF_EPN_OUT_INT) {
996 		recv = USBF_EPN_GET_LDATA(
997 			usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
998 		count = recv;
999 
1000 		buf = req->req.buf;
1001 		buf += req->req.actual;
1002 
1003 		left = req->req.length - req->req.actual;
1004 
1005 		dev_dbg(epn->udc->dev, "ep%u recv %u, left %u, mpkt %u\n", epn->id,
1006 			recv, left, epn->ep.maxpacket);
1007 
1008 		if (left > epn->ep.maxpacket)
1009 			left = epn->ep.maxpacket;
1010 
1011 		if (count > left) {
1012 			req_status = -EOVERFLOW;
1013 			count = left;
1014 		}
1015 
1016 		if (count) {
1017 			nb = count / sizeof(u32);
1018 			if (nb) {
1019 				usbf_ep_reg_read_rep(epn, USBF_REG_EPN_READ,
1020 					buf, nb);
1021 				buf += (nb * sizeof(u32));
1022 				req->req.actual += (nb * sizeof(u32));
1023 				count -= (nb * sizeof(u32));
1024 			}
1025 			if (count) {
1026 				usbf_epn_recv_residue(epn, buf, count);
1027 				req->req.actual += count;
1028 			}
1029 		}
1030 		dev_dbg(epn->udc->dev, "ep%u recv %u/%u\n", epn->id,
1031 			req->req.actual, req->req.length);
1032 
1033 		if (req_status) {
1034 			dev_dbg(epn->udc->dev, "ep%u req.status=%d\n", epn->id,
1035 				req_status);
1036 			req->req.status = req_status;
1037 			return 0;
1038 		}
1039 
1040 		if (recv < epn->ep.maxpacket) {
1041 			dev_dbg(epn->udc->dev, "ep%u short packet\n", epn->id);
1042 			/* This is a short packet -> It is the end */
1043 			req->req.status = 0;
1044 			return 0;
1045 		}
1046 
1047 		/* Request full -> complete */
1048 		if (req->req.actual == req->req.length) {
1049 			req->req.status = 0;
1050 			return 0;
1051 		}
1052 	}
1053 
1054 	if (epn->status & USBF_EPN_OUT_NULL_INT) {
1055 		/* NULL packet received */
1056 		dev_dbg(epn->udc->dev, "ep%u null packet\n", epn->id);
1057 		if (req->req.actual != req->req.length) {
1058 			req->req.status = req->req.short_not_ok ?
1059 					  -EREMOTEIO : 0;
1060 		} else {
1061 			req->req.status = 0;
1062 		}
1063 		return 0;
1064 	}
1065 
1066 	return -EINPROGRESS;
1067 }
1068 
1069 static void usbf_epn_enable_out_end_int(struct usbf_ep *epn)
1070 {
1071 	usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_OUT_END_EN);
1072 }
1073 
1074 static void usbf_epn_process_queue(struct usbf_ep *epn);
1075 
1076 static void usbf_epn_dma_out_send_dma(struct usbf_ep *epn, dma_addr_t addr, u32 npkt, bool is_short)
1077 {
1078 	usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR2, USBF_SYS_EPN_MPKT(epn->ep.maxpacket));
1079 	usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_TADR, addr);
1080 
1081 	if (is_short) {
1082 		usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
1083 				USBF_SYS_EPN_SET_DMACNT(1) | USBF_SYS_EPN_DIR0);
1084 		usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
1085 				USBF_SYS_EPN_REQEN);
1086 
1087 		usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT,
1088 				USBF_EPN_SET_DMACNT(0));
1089 
1090 		/* The end of DMA transfer at the USBF level needs to be handled
1091 		 * after the detection of the end of DMA transfer at the brige
1092 		 * level.
1093 		 * To force this sequence, enabling the OUT_END interrupt will
1094 		 * be donee by the detection of the end of transfer at bridge
1095 		 * level (ie. bridge interrupt).
1096 		 */
1097 		usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
1098 			USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN | USBF_EPN_OUT_END_EN);
1099 		epn->bridge_on_dma_end = usbf_epn_enable_out_end_int;
1100 
1101 		/* Clear any pending OUT_END interrupt */
1102 		usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
1103 			~(u32)USBF_EPN_OUT_END_INT);
1104 
1105 		usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
1106 			USBF_EPN_STOP_MODE | USBF_EPN_STOP_SET | USBF_EPN_DMAMODE0);
1107 		usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
1108 			USBF_EPN_DMA_EN);
1109 		return;
1110 	}
1111 
1112 	usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
1113 		USBF_SYS_EPN_SET_DMACNT(npkt) | USBF_SYS_EPN_DIR0);
1114 	usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
1115 		USBF_SYS_EPN_REQEN);
1116 
1117 	usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT,
1118 		USBF_EPN_SET_DMACNT(npkt));
1119 
1120 	/* Here, the bridge may or may not generate an interrupt to signal the
1121 	 * end of DMA transfer.
1122 	 * Keep only OUT_END interrupt and let handle the bridge later during
1123 	 * the OUT_END processing.
1124 	 */
1125 	usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
1126 		USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN,
1127 		USBF_EPN_OUT_END_EN);
1128 
1129 	/* Disable bridge interrupt. It will be renabled later */
1130 	usbf_reg_bitclr(epn->udc, USBF_REG_AHBBINTEN,
1131 		USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
1132 
1133 	/* Clear any pending DMA_END interrupt at bridge level */
1134 	usbf_reg_writel(epn->udc, USBF_REG_AHBBINT,
1135 		USBF_SYS_DMA_ENDINT_EPN(epn->id));
1136 
1137 	/* Clear any pending OUT_END interrupt */
1138 	usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
1139 		~(u32)USBF_EPN_OUT_END_INT);
1140 
1141 	usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
1142 		USBF_EPN_STOP_MODE | USBF_EPN_STOP_SET | USBF_EPN_DMAMODE0 | USBF_EPN_BURST_SET);
1143 	usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
1144 		USBF_EPN_DMA_EN);
1145 }
1146 
1147 static size_t usbf_epn_dma_out_complete_dma(struct usbf_ep *epn, bool is_short)
1148 {
1149 	u32 dmacnt;
1150 	u32 tmp;
1151 	int ret;
1152 
1153 	/* Restore interrupt mask */
1154 	usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
1155 		USBF_EPN_OUT_END_EN,
1156 		USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
1157 
1158 	if (is_short) {
1159 		/* Nothing more to do when the DMA was for a short packet */
1160 		return 0;
1161 	}
1162 
1163 	/* Enable the bridge interrupt */
1164 	usbf_reg_bitset(epn->udc, USBF_REG_AHBBINTEN,
1165 		USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
1166 
1167 	tmp = usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT);
1168 	dmacnt = USBF_EPN_GET_DMACNT(tmp);
1169 
1170 	if (dmacnt) {
1171 		/* Some packet were not received (halted by a short or a null
1172 		 * packet.
1173 		 * The bridge never raises an interrupt in this case.
1174 		 * Wait for the end of transfer at bridge level
1175 		 */
1176 		ret = readl_poll_timeout_atomic(
1177 			epn->dma_regs + USBF_REG_DMA_EPN_DCR1,
1178 			tmp, (USBF_SYS_EPN_GET_DMACNT(tmp) == dmacnt),
1179 			0,  10000);
1180 		if (ret) {
1181 			dev_err(epn->udc->dev, "ep%u wait bridge timed out\n",
1182 				epn->id);
1183 		}
1184 
1185 		usbf_ep_dma_reg_bitclr(epn, USBF_REG_DMA_EPN_DCR1,
1186 			USBF_SYS_EPN_REQEN);
1187 
1188 		/* The dmacnt value tells how many packet were not transferred
1189 		 * from the maximum number of packet we set for the DMA transfer.
1190 		 * Compute the left DMA size based on this value.
1191 		 */
1192 		return dmacnt * epn->ep.maxpacket;
1193 	}
1194 
1195 	return 0;
1196 }
1197 
1198 static int usbf_epn_dma_out(struct usbf_ep *epn, struct usbf_req *req)
1199 {
1200 	unsigned int dma_left;
1201 	unsigned int count;
1202 	unsigned int recv;
1203 	unsigned int left;
1204 	u32 npkt;
1205 	int ret;
1206 
1207 	if (!IS_ALIGNED((uintptr_t)req->req.buf, 4)) {
1208 		dev_dbg(epn->udc->dev, "ep%u buf unaligned -> fallback pio\n",
1209 			epn->id);
1210 		return usbf_epn_pio_out(epn, req);
1211 	}
1212 
1213 	switch (req->xfer_step) {
1214 	default:
1215 	case USBF_XFER_START:
1216 		if (epn->status & USBF_EPN_OUT_NULL_INT) {
1217 			dev_dbg(epn->udc->dev, "ep%u null packet\n", epn->id);
1218 			if (req->req.actual != req->req.length) {
1219 				req->req.status = req->req.short_not_ok ?
1220 					-EREMOTEIO : 0;
1221 			} else {
1222 				req->req.status = 0;
1223 			}
1224 			return 0;
1225 		}
1226 
1227 		if (!(epn->status & USBF_EPN_OUT_INT)) {
1228 			dev_dbg(epn->udc->dev, "ep%u OUT_INT not set -> spurious\n",
1229 				epn->id);
1230 			break;
1231 		}
1232 
1233 		recv = USBF_EPN_GET_LDATA(
1234 			usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
1235 		if (!recv) {
1236 			dev_dbg(epn->udc->dev, "ep%u recv = 0 -> spurious\n",
1237 				epn->id);
1238 			break;
1239 		}
1240 
1241 		left = req->req.length - req->req.actual;
1242 
1243 		dev_dbg(epn->udc->dev, "ep%u recv %u, left %u, mpkt %u\n", epn->id,
1244 			recv, left, epn->ep.maxpacket);
1245 
1246 		if (recv > left) {
1247 			dev_err(epn->udc->dev, "ep%u overflow (%u/%u)\n",
1248 				epn->id, recv, left);
1249 			req->req.status = -EOVERFLOW;
1250 			return -EOVERFLOW;
1251 		}
1252 
1253 		if (recv < epn->ep.maxpacket) {
1254 			/* Short packet received */
1255 			dev_dbg(epn->udc->dev, "ep%u short packet\n", epn->id);
1256 			if (recv <= 3) {
1257 				usbf_epn_recv_residue(epn,
1258 					req->req.buf + req->req.actual, recv);
1259 				req->req.actual += recv;
1260 
1261 				dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n",
1262 					epn->id, req->req.actual, req->req.length);
1263 
1264 				req->xfer_step = USBF_XFER_START;
1265 				return 0;
1266 			}
1267 
1268 			ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 0);
1269 			if (ret < 0) {
1270 				dev_err(epn->udc->dev, "map request failed (%d)\n",
1271 					ret);
1272 				return ret;
1273 			}
1274 			req->is_mapped = 1;
1275 
1276 			usbf_epn_dma_out_send_dma(epn,
1277 				req->req.dma + req->req.actual,
1278 				1, true);
1279 			req->dma_size = recv & ~0x3;
1280 
1281 			dev_dbg(epn->udc->dev, "ep%u dma short xfer %zu\n", epn->id,
1282 				req->dma_size);
1283 
1284 			req->xfer_step = USBF_XFER_WAIT_DMA_SHORT;
1285 			break;
1286 		}
1287 
1288 		ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 0);
1289 		if (ret < 0) {
1290 			dev_err(epn->udc->dev, "map request failed (%d)\n",
1291 				ret);
1292 			return ret;
1293 		}
1294 		req->is_mapped = 1;
1295 
1296 		/* Use the maximum DMA size according to the request buffer.
1297 		 * We will adjust the received size later at the end of the DMA
1298 		 * transfer with the left size computed from
1299 		 * usbf_epn_dma_out_complete_dma().
1300 		 */
1301 		npkt = left / epn->ep.maxpacket;
1302 		usbf_epn_dma_out_send_dma(epn,
1303 				req->req.dma + req->req.actual,
1304 				npkt, false);
1305 		req->dma_size = npkt * epn->ep.maxpacket;
1306 
1307 		dev_dbg(epn->udc->dev, "ep%u dma xfer %zu (%u)\n", epn->id,
1308 			req->dma_size, npkt);
1309 
1310 		req->xfer_step = USBF_XFER_WAIT_DMA;
1311 		break;
1312 
1313 	case USBF_XFER_WAIT_DMA_SHORT:
1314 		if (!(epn->status & USBF_EPN_OUT_END_INT)) {
1315 			dev_dbg(epn->udc->dev, "ep%u dma short not done\n", epn->id);
1316 			break;
1317 		}
1318 		dev_dbg(epn->udc->dev, "ep%u dma short done\n", epn->id);
1319 
1320 		usbf_epn_dma_out_complete_dma(epn, true);
1321 
1322 		usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
1323 		req->is_mapped = 0;
1324 
1325 		req->req.actual += req->dma_size;
1326 
1327 		recv = USBF_EPN_GET_LDATA(
1328 			usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
1329 
1330 		count = recv & 0x3;
1331 		if (count) {
1332 			dev_dbg(epn->udc->dev, "ep%u recv residue %u\n", epn->id,
1333 				count);
1334 			usbf_epn_recv_residue(epn,
1335 				req->req.buf + req->req.actual, count);
1336 			req->req.actual += count;
1337 		}
1338 
1339 		dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
1340 			req->req.actual, req->req.length);
1341 
1342 		req->xfer_step = USBF_XFER_START;
1343 		return 0;
1344 
1345 	case USBF_XFER_WAIT_DMA:
1346 		if (!(epn->status & USBF_EPN_OUT_END_INT)) {
1347 			dev_dbg(epn->udc->dev, "ep%u dma not done\n", epn->id);
1348 			break;
1349 		}
1350 		dev_dbg(epn->udc->dev, "ep%u dma done\n", epn->id);
1351 
1352 		dma_left = usbf_epn_dma_out_complete_dma(epn, false);
1353 		if (dma_left) {
1354 			/* Adjust the final DMA size with */
1355 			count = req->dma_size - dma_left;
1356 
1357 			dev_dbg(epn->udc->dev, "ep%u dma xfer done %u\n", epn->id,
1358 				count);
1359 
1360 			req->req.actual += count;
1361 
1362 			if (epn->status & USBF_EPN_OUT_NULL_INT) {
1363 				/* DMA was stopped by a null packet reception */
1364 				dev_dbg(epn->udc->dev, "ep%u dma stopped by null pckt\n",
1365 					epn->id);
1366 				usb_gadget_unmap_request(&epn->udc->gadget,
1367 							 &req->req, 0);
1368 				req->is_mapped = 0;
1369 
1370 				usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
1371 					~(u32)USBF_EPN_OUT_NULL_INT);
1372 
1373 				if (req->req.actual != req->req.length) {
1374 					req->req.status = req->req.short_not_ok ?
1375 						  -EREMOTEIO : 0;
1376 				} else {
1377 					req->req.status = 0;
1378 				}
1379 				dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n",
1380 					epn->id, req->req.actual, req->req.length);
1381 				req->xfer_step = USBF_XFER_START;
1382 				return 0;
1383 			}
1384 
1385 			recv = USBF_EPN_GET_LDATA(
1386 				usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
1387 			left = req->req.length - req->req.actual;
1388 			if (recv > left) {
1389 				dev_err(epn->udc->dev,
1390 					"ep%u overflow (%u/%u)\n", epn->id,
1391 					recv, left);
1392 				req->req.status = -EOVERFLOW;
1393 				usb_gadget_unmap_request(&epn->udc->gadget,
1394 							 &req->req, 0);
1395 				req->is_mapped = 0;
1396 
1397 				req->xfer_step = USBF_XFER_START;
1398 				return -EOVERFLOW;
1399 			}
1400 
1401 			if (recv > 3) {
1402 				usbf_epn_dma_out_send_dma(epn,
1403 					req->req.dma + req->req.actual,
1404 					1, true);
1405 				req->dma_size = recv & ~0x3;
1406 
1407 				dev_dbg(epn->udc->dev, "ep%u dma short xfer %zu\n",
1408 					epn->id, req->dma_size);
1409 
1410 				req->xfer_step = USBF_XFER_WAIT_DMA_SHORT;
1411 				break;
1412 			}
1413 
1414 			usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
1415 			req->is_mapped = 0;
1416 
1417 			count = recv & 0x3;
1418 			if (count) {
1419 				dev_dbg(epn->udc->dev, "ep%u recv residue %u\n",
1420 					epn->id, count);
1421 				usbf_epn_recv_residue(epn,
1422 					req->req.buf + req->req.actual, count);
1423 				req->req.actual += count;
1424 			}
1425 
1426 			dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
1427 				req->req.actual, req->req.length);
1428 
1429 			req->xfer_step = USBF_XFER_START;
1430 			return 0;
1431 		}
1432 
1433 		/* Process queue at bridge interrupt only */
1434 		usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
1435 			USBF_EPN_OUT_END_EN | USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
1436 		epn->status = 0;
1437 		epn->bridge_on_dma_end = usbf_epn_process_queue;
1438 
1439 		req->xfer_step = USBF_XFER_WAIT_BRIDGE;
1440 		break;
1441 
1442 	case USBF_XFER_WAIT_BRIDGE:
1443 		dev_dbg(epn->udc->dev, "ep%u bridge transfers done\n", epn->id);
1444 
1445 		/* Restore interrupt mask */
1446 		usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
1447 			USBF_EPN_OUT_END_EN,
1448 			USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
1449 
1450 		usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
1451 		req->is_mapped = 0;
1452 
1453 		req->req.actual += req->dma_size;
1454 
1455 		req->xfer_step = USBF_XFER_START;
1456 		left = req->req.length - req->req.actual;
1457 		if (!left) {
1458 			/* No more data can be added to the buffer */
1459 			dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
1460 				req->req.actual, req->req.length);
1461 			return 0;
1462 		}
1463 		dev_dbg(epn->udc->dev, "ep%u recv done %u/%u, wait more data\n",
1464 			epn->id, req->req.actual, req->req.length);
1465 		break;
1466 	}
1467 
1468 	return -EINPROGRESS;
1469 }
1470 
1471 static void usbf_epn_dma_stop(struct usbf_ep *epn)
1472 {
1473 	usbf_ep_dma_reg_bitclr(epn, USBF_REG_DMA_EPN_DCR1, USBF_SYS_EPN_REQEN);
1474 
1475 	/* In the datasheet:
1476 	 *   If EP[m]_REQEN = 0b is set during DMA transfer, AHB-EPC stops DMA
1477 	 *   after 1 packet transfer completed.
1478 	 *   Therefore, wait sufficient time for ensuring DMA transfer
1479 	 *   completion. The WAIT time depends on the system, especially AHB
1480 	 *   bus activity
1481 	 * So arbitrary 10ms would be sufficient.
1482 	 */
1483 	mdelay(10);
1484 
1485 	usbf_ep_reg_bitclr(epn, USBF_REG_EPN_DMA_CTRL, USBF_EPN_DMA_EN);
1486 }
1487 
1488 static void usbf_epn_dma_abort(struct usbf_ep *epn,  struct usbf_req *req)
1489 {
1490 	dev_dbg(epn->udc->dev, "ep%u %s dma abort\n", epn->id,
1491 		epn->is_in ? "in" : "out");
1492 
1493 	epn->bridge_on_dma_end = NULL;
1494 
1495 	usbf_epn_dma_stop(epn);
1496 
1497 	usb_gadget_unmap_request(&epn->udc->gadget, &req->req,
1498 				 epn->is_in ? 1 : 0);
1499 	req->is_mapped = 0;
1500 
1501 	usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
1502 
1503 	if (epn->is_in) {
1504 		usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
1505 			USBF_EPN_IN_END_EN,
1506 			USBF_EPN_IN_EN);
1507 	} else {
1508 		usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
1509 			USBF_EPN_OUT_END_EN,
1510 			USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
1511 	}
1512 
1513 	/* As dma is stopped, be sure that no DMA interrupt are pending */
1514 	usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
1515 		USBF_EPN_IN_END_INT | USBF_EPN_OUT_END_INT);
1516 
1517 	usbf_reg_writel(epn->udc, USBF_REG_AHBBINT, USBF_SYS_DMA_ENDINT_EPN(epn->id));
1518 
1519 	/* Enable DMA interrupt the bridge level */
1520 	usbf_reg_bitset(epn->udc, USBF_REG_AHBBINTEN,
1521 		USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
1522 
1523 	/* Reset transfer step */
1524 	req->xfer_step = USBF_XFER_START;
1525 }
1526 
1527 static void usbf_epn_fifo_flush(struct usbf_ep *epn)
1528 {
1529 	u32 ctrl;
1530 	u32 sts;
1531 	int ret;
1532 
1533 	dev_dbg(epn->udc->dev, "ep%u %s fifo flush\n", epn->id,
1534 		epn->is_in ? "in" : "out");
1535 
1536 	ctrl = usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL);
1537 	usbf_ep_reg_writel(epn, USBF_REG_EPN_CONTROL, ctrl | USBF_EPN_BCLR);
1538 
1539 	if (ctrl & USBF_EPN_DIR0)
1540 		return;
1541 
1542 	ret = readl_poll_timeout_atomic(epn->regs + USBF_REG_EPN_STATUS, sts,
1543 		(sts & (USBF_EPN_IN_DATA | USBF_EPN_IN_EMPTY)) == USBF_EPN_IN_EMPTY,
1544 		0,  10000);
1545 	if (ret)
1546 		dev_err(epn->udc->dev, "ep%u flush fifo timed out\n", epn->id);
1547 }
1548 
1549 static void usbf_ep_req_done(struct usbf_ep *ep, struct usbf_req *req,
1550 			     int status)
1551 {
1552 	list_del_init(&req->queue);
1553 
1554 	if (status) {
1555 		req->req.status = status;
1556 	} else {
1557 		if (req->req.status == -EINPROGRESS)
1558 			req->req.status = status;
1559 	}
1560 
1561 	dev_dbg(ep->udc->dev, "ep%u %s req done length %u/%u, status=%d\n", ep->id,
1562 		ep->is_in ? "in" : "out",
1563 		req->req.actual, req->req.length, req->req.status);
1564 
1565 	if (req->is_mapped)
1566 		usbf_epn_dma_abort(ep, req);
1567 
1568 	spin_unlock(&ep->udc->lock);
1569 	usb_gadget_giveback_request(&ep->ep, &req->req);
1570 	spin_lock(&ep->udc->lock);
1571 }
1572 
1573 static void usbf_ep_nuke(struct usbf_ep *ep, int status)
1574 {
1575 	struct usbf_req *req;
1576 
1577 	dev_dbg(ep->udc->dev, "ep%u %s nuke status %d\n", ep->id,
1578 		ep->is_in ? "in" : "out",
1579 		status);
1580 
1581 	while (!list_empty(&ep->queue)) {
1582 		req = list_first_entry(&ep->queue, struct usbf_req, queue);
1583 		usbf_ep_req_done(ep, req, status);
1584 	}
1585 
1586 	if (ep->id == 0)
1587 		usbf_ep0_fifo_flush(ep);
1588 	else
1589 		usbf_epn_fifo_flush(ep);
1590 }
1591 
1592 static bool usbf_ep_is_stalled(struct usbf_ep *ep)
1593 {
1594 	u32 ctrl;
1595 
1596 	if (ep->id == 0) {
1597 		ctrl = usbf_ep_reg_readl(ep, USBF_REG_EP0_CONTROL);
1598 		return (ctrl & USBF_EP0_STL) ? true : false;
1599 	}
1600 
1601 	ctrl = usbf_ep_reg_readl(ep, USBF_REG_EPN_CONTROL);
1602 	if (ep->is_in)
1603 		return (ctrl & USBF_EPN_ISTL) ? true : false;
1604 
1605 	return (ctrl & USBF_EPN_OSTL) ? true : false;
1606 }
1607 
1608 static int usbf_epn_start_queue(struct usbf_ep *epn)
1609 {
1610 	struct usbf_req *req;
1611 	int ret;
1612 
1613 	if (usbf_ep_is_stalled(epn))
1614 		return 0;
1615 
1616 	req = list_first_entry_or_null(&epn->queue, struct usbf_req, queue);
1617 
1618 	if (epn->is_in) {
1619 		if (req && !epn->is_processing) {
1620 			ret = epn->dma_regs ?
1621 				usbf_epn_dma_in(epn, req) :
1622 				usbf_epn_pio_in(epn, req);
1623 			if (ret != -EINPROGRESS) {
1624 				dev_err(epn->udc->dev,
1625 					"queued next request not in progress\n");
1626 					/* The request cannot be completed (ie
1627 					 * ret == 0) on the first call.
1628 					 * stall and nuke the endpoint
1629 					 */
1630 				return ret ? ret : -EIO;
1631 			}
1632 		}
1633 	} else {
1634 		if (req) {
1635 			/* Clear ONAK to accept OUT tokens */
1636 			usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL,
1637 				USBF_EPN_ONAK);
1638 
1639 			/* Enable interrupts */
1640 			usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA,
1641 				USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
1642 		} else {
1643 			/* Disable incoming data and interrupt.
1644 			 * They will be enable on next usb_eb_queue call
1645 			 */
1646 			usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL,
1647 				USBF_EPN_ONAK);
1648 			usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
1649 				USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
1650 		}
1651 	}
1652 	return 0;
1653 }
1654 
1655 static int usbf_ep_process_queue(struct usbf_ep *ep)
1656 {
1657 	int (*usbf_ep_xfer)(struct usbf_ep *ep, struct usbf_req *req);
1658 	struct usbf_req *req;
1659 	int is_processing;
1660 	int ret;
1661 
1662 	if (ep->is_in) {
1663 		usbf_ep_xfer = usbf_ep0_pio_in;
1664 		if (ep->id) {
1665 			usbf_ep_xfer = ep->dma_regs ?
1666 					usbf_epn_dma_in : usbf_epn_pio_in;
1667 		}
1668 	} else {
1669 		usbf_ep_xfer = usbf_ep0_pio_out;
1670 		if (ep->id) {
1671 			usbf_ep_xfer = ep->dma_regs ?
1672 					usbf_epn_dma_out : usbf_epn_pio_out;
1673 		}
1674 	}
1675 
1676 	req = list_first_entry_or_null(&ep->queue, struct usbf_req, queue);
1677 	if (!req) {
1678 		dev_err(ep->udc->dev,
1679 			"no request available for ep%u %s process\n", ep->id,
1680 			ep->is_in ? "in" : "out");
1681 		return -ENOENT;
1682 	}
1683 
1684 	do {
1685 		/* Were going to read the FIFO for this current request.
1686 		 * NAK any other incoming data to avoid a race condition if no
1687 		 * more request are available.
1688 		 */
1689 		if (!ep->is_in && ep->id != 0) {
1690 			usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
1691 				USBF_EPN_ONAK);
1692 		}
1693 
1694 		ret = usbf_ep_xfer(ep, req);
1695 		if (ret == -EINPROGRESS) {
1696 			if (!ep->is_in && ep->id != 0) {
1697 				/* The current request needs more data.
1698 				 * Allow incoming data
1699 				 */
1700 				usbf_ep_reg_bitclr(ep, USBF_REG_EPN_CONTROL,
1701 					USBF_EPN_ONAK);
1702 			}
1703 			return ret;
1704 		}
1705 
1706 		is_processing = ep->is_processing;
1707 		ep->is_processing = 1;
1708 		usbf_ep_req_done(ep, req, ret);
1709 		ep->is_processing = is_processing;
1710 
1711 		if (ret) {
1712 			/* An error was detected during the request transfer.
1713 			 * Any pending DMA transfers were aborted by the
1714 			 * usbf_ep_req_done() call.
1715 			 * It's time to flush the fifo
1716 			 */
1717 			if (ep->id == 0)
1718 				usbf_ep0_fifo_flush(ep);
1719 			else
1720 				usbf_epn_fifo_flush(ep);
1721 		}
1722 
1723 		req = list_first_entry_or_null(&ep->queue, struct usbf_req,
1724 					       queue);
1725 
1726 		if (ep->is_in)
1727 			continue;
1728 
1729 		if (ep->id != 0) {
1730 			if (req) {
1731 				/* An other request is available.
1732 				 * Allow incoming data
1733 				 */
1734 				usbf_ep_reg_bitclr(ep, USBF_REG_EPN_CONTROL,
1735 					USBF_EPN_ONAK);
1736 			} else {
1737 				/* No request queued. Disable interrupts.
1738 				 * They will be enabled on usb_ep_queue
1739 				 */
1740 				usbf_ep_reg_bitclr(ep, USBF_REG_EPN_INT_ENA,
1741 					USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
1742 			}
1743 		}
1744 		/* Do not recall usbf_ep_xfer() */
1745 		return req ? -EINPROGRESS : 0;
1746 
1747 	} while (req);
1748 
1749 	return 0;
1750 }
1751 
1752 static void usbf_ep_stall(struct usbf_ep *ep, bool stall)
1753 {
1754 	struct usbf_req *first;
1755 
1756 	dev_dbg(ep->udc->dev, "ep%u %s %s\n", ep->id,
1757 		ep->is_in ? "in" : "out",
1758 		stall ? "stall" : "unstall");
1759 
1760 	if (ep->id == 0) {
1761 		if (stall)
1762 			usbf_ep_reg_bitset(ep, USBF_REG_EP0_CONTROL, USBF_EP0_STL);
1763 		else
1764 			usbf_ep_reg_bitclr(ep, USBF_REG_EP0_CONTROL, USBF_EP0_STL);
1765 		return;
1766 	}
1767 
1768 	if (stall) {
1769 		if (ep->is_in)
1770 			usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
1771 				USBF_EPN_ISTL);
1772 		else
1773 			usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
1774 				USBF_EPN_OSTL | USBF_EPN_OSTL_EN);
1775 	} else {
1776 		first = list_first_entry_or_null(&ep->queue, struct usbf_req, queue);
1777 		if (first && first->is_mapped) {
1778 			/* This can appear if the host halts an endpoint using
1779 			 * SET_FEATURE and then un-halts the endpoint
1780 			 */
1781 			usbf_epn_dma_abort(ep, first);
1782 		}
1783 		usbf_epn_fifo_flush(ep);
1784 		if (ep->is_in) {
1785 			usbf_ep_reg_clrset(ep, USBF_REG_EPN_CONTROL,
1786 				USBF_EPN_ISTL,
1787 				USBF_EPN_IPIDCLR);
1788 		} else {
1789 			usbf_ep_reg_clrset(ep, USBF_REG_EPN_CONTROL,
1790 				USBF_EPN_OSTL,
1791 				USBF_EPN_OSTL_EN | USBF_EPN_OPIDCLR);
1792 		}
1793 		usbf_epn_start_queue(ep);
1794 	}
1795 }
1796 
1797 static void usbf_ep0_enable(struct usbf_ep *ep0)
1798 {
1799 	usbf_ep_reg_writel(ep0, USBF_REG_EP0_CONTROL, USBF_EP0_INAK_EN | USBF_EP0_BCLR);
1800 
1801 	usbf_ep_reg_writel(ep0, USBF_REG_EP0_INT_ENA,
1802 		USBF_EP0_SETUP_EN | USBF_EP0_STG_START_EN | USBF_EP0_STG_END_EN |
1803 		USBF_EP0_OUT_EN | USBF_EP0_OUT_NULL_EN | USBF_EP0_IN_EN);
1804 
1805 	ep0->udc->ep0state = EP0_IDLE;
1806 	ep0->disabled = 0;
1807 
1808 	/* enable interrupts for the ep0 */
1809 	usbf_reg_bitset(ep0->udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(0));
1810 }
1811 
1812 static int usbf_epn_enable(struct usbf_ep *epn)
1813 {
1814 	u32 base_addr;
1815 	u32 ctrl;
1816 
1817 	base_addr = usbf_ep_info[epn->id].base_addr;
1818 	usbf_ep_reg_writel(epn, USBF_REG_EPN_PCKT_ADRS,
1819 		USBF_EPN_BASEAD(base_addr) | USBF_EPN_MPKT(epn->ep.maxpacket));
1820 
1821 	/* OUT transfer interrupt are enabled during usb_ep_queue */
1822 	if (epn->is_in) {
1823 		/* Will be changed in DMA processing */
1824 		usbf_ep_reg_writel(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_IN_EN);
1825 	}
1826 
1827 	/* Clear, set endpoint direction, set IN/OUT STL, and enable
1828 	 * Send NAK for Data out as request are not queued yet
1829 	 */
1830 	ctrl = USBF_EPN_EN | USBF_EPN_BCLR;
1831 	if (epn->is_in)
1832 		ctrl |= USBF_EPN_OSTL | USBF_EPN_OSTL_EN;
1833 	else
1834 		ctrl |= USBF_EPN_DIR0 | USBF_EPN_ISTL | USBF_EPN_OSTL_EN | USBF_EPN_ONAK;
1835 	usbf_ep_reg_writel(epn, USBF_REG_EPN_CONTROL, ctrl);
1836 
1837 	return 0;
1838 }
1839 
1840 static int usbf_ep_enable(struct usb_ep *_ep,
1841 			  const struct usb_endpoint_descriptor *desc)
1842 {
1843 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
1844 	struct usbf_udc *udc = ep->udc;
1845 	unsigned long flags;
1846 	int ret;
1847 
1848 	if (ep->id == 0)
1849 		return -EINVAL;
1850 
1851 	if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
1852 		return -EINVAL;
1853 
1854 	dev_dbg(ep->udc->dev, "ep%u %s mpkts %d\n", ep->id,
1855 		usb_endpoint_dir_in(desc) ? "in" : "out",
1856 		usb_endpoint_maxp(desc));
1857 
1858 	spin_lock_irqsave(&ep->udc->lock, flags);
1859 	ep->is_in = usb_endpoint_dir_in(desc);
1860 	ep->ep.maxpacket = usb_endpoint_maxp(desc);
1861 
1862 	ret = usbf_epn_enable(ep);
1863 	if (ret)
1864 		goto end;
1865 
1866 	ep->disabled = 0;
1867 
1868 	/* enable interrupts for this endpoint */
1869 	usbf_reg_bitset(udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(ep->id));
1870 
1871 	/* enable DMA interrupt at bridge level if DMA is used */
1872 	if (ep->dma_regs) {
1873 		ep->bridge_on_dma_end = NULL;
1874 		usbf_reg_bitset(udc, USBF_REG_AHBBINTEN,
1875 			USBF_SYS_DMA_ENDINTEN_EPN(ep->id));
1876 	}
1877 
1878 	ret = 0;
1879 end:
1880 	spin_unlock_irqrestore(&ep->udc->lock, flags);
1881 	return ret;
1882 }
1883 
1884 static int usbf_epn_disable(struct usbf_ep *epn)
1885 {
1886 	/* Disable interrupts */
1887 	usbf_ep_reg_writel(epn, USBF_REG_EPN_INT_ENA, 0);
1888 
1889 	/* Disable endpoint */
1890 	usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_EN);
1891 
1892 	/* remove anything that was pending */
1893 	usbf_ep_nuke(epn, -ESHUTDOWN);
1894 
1895 	return 0;
1896 }
1897 
1898 static int usbf_ep_disable(struct usb_ep *_ep)
1899 {
1900 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
1901 	struct usbf_udc *udc = ep->udc;
1902 	unsigned long flags;
1903 	int ret;
1904 
1905 	if (ep->id == 0)
1906 		return -EINVAL;
1907 
1908 	dev_dbg(ep->udc->dev, "ep%u %s mpkts %d\n", ep->id,
1909 		ep->is_in ? "in" : "out", ep->ep.maxpacket);
1910 
1911 	spin_lock_irqsave(&ep->udc->lock, flags);
1912 	ep->disabled = 1;
1913 	/* Disable DMA interrupt */
1914 	if (ep->dma_regs) {
1915 		usbf_reg_bitclr(udc, USBF_REG_AHBBINTEN,
1916 			USBF_SYS_DMA_ENDINTEN_EPN(ep->id));
1917 		ep->bridge_on_dma_end = NULL;
1918 	}
1919 	/* disable interrupts for this endpoint */
1920 	usbf_reg_bitclr(udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(ep->id));
1921 	/* and the endpoint itself */
1922 	ret = usbf_epn_disable(ep);
1923 	spin_unlock_irqrestore(&ep->udc->lock, flags);
1924 
1925 	return ret;
1926 }
1927 
1928 static int usbf_ep0_queue(struct usbf_ep *ep0, struct usbf_req *req,
1929 			  gfp_t gfp_flags)
1930 {
1931 	int ret;
1932 
1933 	req->req.actual = 0;
1934 	req->req.status = -EINPROGRESS;
1935 	req->is_zero_sent = 0;
1936 
1937 	list_add_tail(&req->queue, &ep0->queue);
1938 
1939 	if (ep0->udc->ep0state == EP0_IN_STATUS_START_PHASE)
1940 		return 0;
1941 
1942 	if (!ep0->is_in)
1943 		return 0;
1944 
1945 	if (ep0->udc->ep0state == EP0_IN_STATUS_PHASE) {
1946 		if (req->req.length) {
1947 			dev_err(ep0->udc->dev,
1948 				"request lng %u for ep0 in status phase\n",
1949 				req->req.length);
1950 			return -EINVAL;
1951 		}
1952 		ep0->delayed_status = 0;
1953 	}
1954 	if (!ep0->is_processing) {
1955 		ret = usbf_ep0_pio_in(ep0, req);
1956 		if (ret != -EINPROGRESS) {
1957 			dev_err(ep0->udc->dev,
1958 				"queued request not in progress\n");
1959 			/* The request cannot be completed (ie
1960 			 * ret == 0) on the first call
1961 			 */
1962 			return ret ? ret : -EIO;
1963 		}
1964 	}
1965 
1966 	return 0;
1967 }
1968 
1969 static int usbf_epn_queue(struct usbf_ep *ep, struct usbf_req *req,
1970 			  gfp_t gfp_flags)
1971 {
1972 	int was_empty;
1973 	int ret;
1974 
1975 	if (ep->disabled) {
1976 		dev_err(ep->udc->dev, "ep%u request queue while disable\n",
1977 			ep->id);
1978 		return -ESHUTDOWN;
1979 	}
1980 
1981 	req->req.actual = 0;
1982 	req->req.status = -EINPROGRESS;
1983 	req->is_zero_sent = 0;
1984 	req->xfer_step = USBF_XFER_START;
1985 
1986 	was_empty = list_empty(&ep->queue);
1987 	list_add_tail(&req->queue, &ep->queue);
1988 	if (was_empty) {
1989 		ret = usbf_epn_start_queue(ep);
1990 		if (ret)
1991 			return ret;
1992 	}
1993 	return 0;
1994 }
1995 
1996 static int usbf_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1997 			 gfp_t gfp_flags)
1998 {
1999 	struct usbf_req *req = container_of(_req, struct usbf_req, req);
2000 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
2001 	struct usbf_udc *udc = ep->udc;
2002 	unsigned long flags;
2003 	int ret;
2004 
2005 	if (!_req || !_req->buf)
2006 		return -EINVAL;
2007 
2008 	if (!udc || !udc->driver)
2009 		return -EINVAL;
2010 
2011 	dev_dbg(ep->udc->dev, "ep%u %s req queue length %u, zero %u, short_not_ok %u\n",
2012 		ep->id, ep->is_in ? "in" : "out",
2013 		req->req.length, req->req.zero, req->req.short_not_ok);
2014 
2015 	spin_lock_irqsave(&ep->udc->lock, flags);
2016 	if (ep->id == 0)
2017 		ret = usbf_ep0_queue(ep, req, gfp_flags);
2018 	else
2019 		ret = usbf_epn_queue(ep, req, gfp_flags);
2020 	spin_unlock_irqrestore(&ep->udc->lock, flags);
2021 	return ret;
2022 }
2023 
2024 static int usbf_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
2025 {
2026 	struct usbf_req *req = container_of(_req, struct usbf_req, req);
2027 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
2028 	unsigned long flags;
2029 	int is_processing;
2030 	int first;
2031 	int ret;
2032 
2033 	spin_lock_irqsave(&ep->udc->lock, flags);
2034 
2035 	dev_dbg(ep->udc->dev, "ep%u %s req dequeue length %u/%u\n",
2036 		ep->id, ep->is_in ? "in" : "out",
2037 		req->req.actual, req->req.length);
2038 
2039 	first = list_is_first(&req->queue, &ep->queue);
2040 
2041 	/* Complete the request but avoid any operation that could be done
2042 	 * if a new request is queued during the request completion
2043 	 */
2044 	is_processing = ep->is_processing;
2045 	ep->is_processing = 1;
2046 	usbf_ep_req_done(ep, req, -ECONNRESET);
2047 	ep->is_processing = is_processing;
2048 
2049 	if (first) {
2050 		/* The first item in the list was dequeued.
2051 		 * This item could already be submitted to the hardware.
2052 		 * So, flush the fifo
2053 		 */
2054 		if (ep->id)
2055 			usbf_epn_fifo_flush(ep);
2056 		else
2057 			usbf_ep0_fifo_flush(ep);
2058 	}
2059 
2060 	if (ep->id == 0) {
2061 		/* We dequeue a request on ep0. On this endpoint, we can have
2062 		 * 1 request related to the data stage and/or 1 request
2063 		 * related to the status stage.
2064 		 * We dequeue one of them and so the USB control transaction
2065 		 * is no more coherent. The simple way to be consistent after
2066 		 * dequeuing is to stall and nuke the endpoint and wait the
2067 		 * next SETUP packet.
2068 		 */
2069 		usbf_ep_stall(ep, true);
2070 		usbf_ep_nuke(ep, -ECONNRESET);
2071 		ep->udc->ep0state = EP0_IDLE;
2072 		goto end;
2073 	}
2074 
2075 	if (!first)
2076 		goto end;
2077 
2078 	ret = usbf_epn_start_queue(ep);
2079 	if (ret) {
2080 		usbf_ep_stall(ep, true);
2081 		usbf_ep_nuke(ep, -EIO);
2082 	}
2083 end:
2084 	spin_unlock_irqrestore(&ep->udc->lock, flags);
2085 	return 0;
2086 }
2087 
2088 static struct usb_request *usbf_ep_alloc_request(struct usb_ep *_ep,
2089 						 gfp_t gfp_flags)
2090 {
2091 	struct usbf_req *req;
2092 
2093 	if (!_ep)
2094 		return NULL;
2095 
2096 	req = kzalloc(sizeof(*req), gfp_flags);
2097 	if (!req)
2098 		return NULL;
2099 
2100 	INIT_LIST_HEAD(&req->queue);
2101 
2102 	return &req->req;
2103 }
2104 
2105 static void usbf_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
2106 {
2107 	struct usbf_req *req;
2108 	unsigned long flags;
2109 	struct usbf_ep *ep;
2110 
2111 	if (!_ep || !_req)
2112 		return;
2113 
2114 	req = container_of(_req, struct usbf_req, req);
2115 	ep = container_of(_ep, struct usbf_ep, ep);
2116 
2117 	spin_lock_irqsave(&ep->udc->lock, flags);
2118 	list_del_init(&req->queue);
2119 	spin_unlock_irqrestore(&ep->udc->lock, flags);
2120 	kfree(req);
2121 }
2122 
2123 static int usbf_ep_set_halt(struct usb_ep *_ep, int halt)
2124 {
2125 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
2126 	unsigned long flags;
2127 	int ret;
2128 
2129 	if (ep->id == 0)
2130 		return -EINVAL;
2131 
2132 	spin_lock_irqsave(&ep->udc->lock, flags);
2133 
2134 	if (!list_empty(&ep->queue)) {
2135 		ret = -EAGAIN;
2136 		goto end;
2137 	}
2138 
2139 	usbf_ep_stall(ep, halt);
2140 	if (!halt)
2141 		ep->is_wedged = 0;
2142 
2143 	ret = 0;
2144 end:
2145 	spin_unlock_irqrestore(&ep->udc->lock, flags);
2146 
2147 	return ret;
2148 }
2149 
2150 static int usbf_ep_set_wedge(struct usb_ep *_ep)
2151 {
2152 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
2153 	unsigned long flags;
2154 	int ret;
2155 
2156 	if (ep->id == 0)
2157 		return -EINVAL;
2158 
2159 	spin_lock_irqsave(&ep->udc->lock, flags);
2160 	if (!list_empty(&ep->queue)) {
2161 		ret = -EAGAIN;
2162 		goto end;
2163 	}
2164 	usbf_ep_stall(ep, 1);
2165 	ep->is_wedged = 1;
2166 
2167 	ret = 0;
2168 end:
2169 	spin_unlock_irqrestore(&ep->udc->lock, flags);
2170 	return ret;
2171 }
2172 
2173 static struct usb_ep_ops usbf_ep_ops = {
2174 	.enable = usbf_ep_enable,
2175 	.disable = usbf_ep_disable,
2176 	.queue = usbf_ep_queue,
2177 	.dequeue = usbf_ep_dequeue,
2178 	.set_halt = usbf_ep_set_halt,
2179 	.set_wedge = usbf_ep_set_wedge,
2180 	.alloc_request = usbf_ep_alloc_request,
2181 	.free_request = usbf_ep_free_request,
2182 };
2183 
2184 static void usbf_ep0_req_complete(struct usb_ep *_ep, struct usb_request *_req)
2185 {
2186 }
2187 
2188 static void usbf_ep0_fill_req(struct usbf_ep *ep0, struct usbf_req *req,
2189 			      void *buf, unsigned int length,
2190 			      void (*complete)(struct usb_ep *_ep,
2191 					       struct usb_request *_req))
2192 {
2193 	if (buf && length)
2194 		memcpy(ep0->udc->ep0_buf, buf, length);
2195 
2196 	req->req.buf = ep0->udc->ep0_buf;
2197 	req->req.length = length;
2198 	req->req.dma = 0;
2199 	req->req.zero = true;
2200 	req->req.complete = complete ? complete : usbf_ep0_req_complete;
2201 	req->req.status = -EINPROGRESS;
2202 	req->req.context = NULL;
2203 	req->req.actual = 0;
2204 }
2205 
2206 static struct usbf_ep *usbf_get_ep_by_addr(struct usbf_udc *udc, u8 address)
2207 {
2208 	struct usbf_ep *ep;
2209 	unsigned int i;
2210 
2211 	if ((address & USB_ENDPOINT_NUMBER_MASK) == 0)
2212 		return &udc->ep[0];
2213 
2214 	for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
2215 		ep = &udc->ep[i];
2216 
2217 		if (!ep->ep.desc)
2218 			continue;
2219 
2220 		if (ep->ep.desc->bEndpointAddress == address)
2221 			return ep;
2222 	}
2223 
2224 	return NULL;
2225 }
2226 
2227 static int usbf_req_delegate(struct usbf_udc *udc,
2228 			     const struct usb_ctrlrequest *ctrlrequest)
2229 {
2230 	int ret;
2231 
2232 	spin_unlock(&udc->lock);
2233 	ret = udc->driver->setup(&udc->gadget, ctrlrequest);
2234 	spin_lock(&udc->lock);
2235 	if (ret < 0) {
2236 		dev_dbg(udc->dev, "udc driver setup failed %d\n", ret);
2237 		return ret;
2238 	}
2239 	if (ret == USB_GADGET_DELAYED_STATUS) {
2240 		dev_dbg(udc->dev, "delayed status set\n");
2241 		udc->ep[0].delayed_status = 1;
2242 		return 0;
2243 	}
2244 	return ret;
2245 }
2246 
2247 static int usbf_req_get_status(struct usbf_udc *udc,
2248 			       const struct usb_ctrlrequest *ctrlrequest)
2249 {
2250 	struct usbf_ep *ep;
2251 	u16 status_data;
2252 	u16 wLength;
2253 	u16 wValue;
2254 	u16 wIndex;
2255 
2256 	wValue  = le16_to_cpu(ctrlrequest->wValue);
2257 	wLength = le16_to_cpu(ctrlrequest->wLength);
2258 	wIndex  = le16_to_cpu(ctrlrequest->wIndex);
2259 
2260 	switch (ctrlrequest->bRequestType) {
2261 	case USB_DIR_IN | USB_RECIP_DEVICE | USB_TYPE_STANDARD:
2262 		if ((wValue != 0) || (wIndex != 0) || (wLength != 2))
2263 			goto delegate;
2264 
2265 		status_data = 0;
2266 		if (udc->gadget.is_selfpowered)
2267 			status_data |= BIT(USB_DEVICE_SELF_POWERED);
2268 
2269 		if (udc->is_remote_wakeup)
2270 			status_data |= BIT(USB_DEVICE_REMOTE_WAKEUP);
2271 
2272 		break;
2273 
2274 	case USB_DIR_IN | USB_RECIP_ENDPOINT | USB_TYPE_STANDARD:
2275 		if ((wValue != 0) || (wLength != 2))
2276 			goto delegate;
2277 
2278 		ep = usbf_get_ep_by_addr(udc, wIndex);
2279 		if (!ep)
2280 			return -EINVAL;
2281 
2282 		status_data = 0;
2283 		if (usbf_ep_is_stalled(ep))
2284 			status_data |= cpu_to_le16(1);
2285 		break;
2286 
2287 	case USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_STANDARD:
2288 		if ((wValue != 0) || (wLength != 2))
2289 			goto delegate;
2290 		status_data = 0;
2291 		break;
2292 
2293 	default:
2294 		goto delegate;
2295 	}
2296 
2297 	usbf_ep0_fill_req(&udc->ep[0], &udc->setup_reply, &status_data,
2298 			  sizeof(status_data), NULL);
2299 	usbf_ep0_queue(&udc->ep[0], &udc->setup_reply, GFP_ATOMIC);
2300 
2301 	return 0;
2302 
2303 delegate:
2304 	return usbf_req_delegate(udc, ctrlrequest);
2305 }
2306 
2307 static int usbf_req_clear_set_feature(struct usbf_udc *udc,
2308 				      const struct usb_ctrlrequest *ctrlrequest,
2309 				      bool is_set)
2310 {
2311 	struct usbf_ep *ep;
2312 	u16 wLength;
2313 	u16 wValue;
2314 	u16 wIndex;
2315 
2316 	wValue  = le16_to_cpu(ctrlrequest->wValue);
2317 	wLength = le16_to_cpu(ctrlrequest->wLength);
2318 	wIndex  = le16_to_cpu(ctrlrequest->wIndex);
2319 
2320 	switch (ctrlrequest->bRequestType) {
2321 	case USB_DIR_OUT | USB_RECIP_DEVICE:
2322 		if ((wIndex != 0) || (wLength != 0))
2323 			goto delegate;
2324 
2325 		if (wValue != cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP))
2326 			goto delegate;
2327 
2328 		udc->is_remote_wakeup = is_set;
2329 		break;
2330 
2331 	case USB_DIR_OUT | USB_RECIP_ENDPOINT:
2332 		if (wLength != 0)
2333 			goto delegate;
2334 
2335 		ep = usbf_get_ep_by_addr(udc, wIndex);
2336 		if (!ep)
2337 			return -EINVAL;
2338 
2339 		if ((ep->id == 0) && is_set) {
2340 			/* Endpoint 0 cannot be halted (stalled)
2341 			 * Returning an error code leads to a STALL on this ep0
2342 			 * but keep the automate in a consistent state.
2343 			 */
2344 			return -EINVAL;
2345 		}
2346 		if (ep->is_wedged && !is_set) {
2347 			/* Ignore CLEAR_FEATURE(HALT ENDPOINT) when the
2348 			 * endpoint is wedged
2349 			 */
2350 			break;
2351 		}
2352 		usbf_ep_stall(ep, is_set);
2353 		break;
2354 
2355 	default:
2356 		goto delegate;
2357 	}
2358 
2359 	return 0;
2360 
2361 delegate:
2362 	return usbf_req_delegate(udc, ctrlrequest);
2363 }
2364 
2365 static void usbf_ep0_req_set_address_complete(struct usb_ep *_ep,
2366 					      struct usb_request *_req)
2367 {
2368 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
2369 
2370 	/* The status phase of the SET_ADDRESS request is completed ... */
2371 	if (_req->status == 0) {
2372 		/* ... without any errors -> Signaled the state to the core. */
2373 		usb_gadget_set_state(&ep->udc->gadget, USB_STATE_ADDRESS);
2374 	}
2375 
2376 	/* In case of request failure, there is no need to revert the address
2377 	 * value set to the hardware as the hardware will take care of the
2378 	 * value only if the status stage is completed normally.
2379 	 */
2380 }
2381 
2382 static int usbf_req_set_address(struct usbf_udc *udc,
2383 				const struct usb_ctrlrequest *ctrlrequest)
2384 {
2385 	u16 wLength;
2386 	u16 wValue;
2387 	u16 wIndex;
2388 	u32 addr;
2389 
2390 	wValue  = le16_to_cpu(ctrlrequest->wValue);
2391 	wLength = le16_to_cpu(ctrlrequest->wLength);
2392 	wIndex  = le16_to_cpu(ctrlrequest->wIndex);
2393 
2394 	if (ctrlrequest->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
2395 		goto delegate;
2396 
2397 	if ((wIndex != 0) || (wLength != 0) || (wValue > 127))
2398 		return -EINVAL;
2399 
2400 	addr = wValue;
2401 	/* The hardware will take care of this USB address after the status
2402 	 * stage of the SET_ADDRESS request is completed normally.
2403 	 * It is safe to write it now
2404 	 */
2405 	usbf_reg_writel(udc, USBF_REG_USB_ADDRESS, USBF_USB_SET_USB_ADDR(addr));
2406 
2407 	/* Queued the status request */
2408 	usbf_ep0_fill_req(&udc->ep[0], &udc->setup_reply, NULL, 0,
2409 			  usbf_ep0_req_set_address_complete);
2410 	usbf_ep0_queue(&udc->ep[0], &udc->setup_reply, GFP_ATOMIC);
2411 
2412 	return 0;
2413 
2414 delegate:
2415 	return usbf_req_delegate(udc, ctrlrequest);
2416 }
2417 
2418 static int usbf_req_set_configuration(struct usbf_udc *udc,
2419 				      const struct usb_ctrlrequest *ctrlrequest)
2420 {
2421 	u16 wLength;
2422 	u16 wValue;
2423 	u16 wIndex;
2424 	int ret;
2425 
2426 	ret = usbf_req_delegate(udc, ctrlrequest);
2427 	if (ret)
2428 		return ret;
2429 
2430 	wValue  = le16_to_cpu(ctrlrequest->wValue);
2431 	wLength = le16_to_cpu(ctrlrequest->wLength);
2432 	wIndex  = le16_to_cpu(ctrlrequest->wIndex);
2433 
2434 	if ((ctrlrequest->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE)) ||
2435 	    (wIndex != 0) || (wLength != 0)) {
2436 		/* No error detected by driver->setup() but it is not an USB2.0
2437 		 * Ch9 SET_CONFIGURATION.
2438 		 * Nothing more to do
2439 		 */
2440 		return 0;
2441 	}
2442 
2443 	if (wValue & 0x00FF) {
2444 		usbf_reg_bitset(udc, USBF_REG_USB_CONTROL, USBF_USB_CONF);
2445 	} else {
2446 		usbf_reg_bitclr(udc, USBF_REG_USB_CONTROL, USBF_USB_CONF);
2447 		/* Go back to Address State */
2448 		spin_unlock(&udc->lock);
2449 		usb_gadget_set_state(&udc->gadget, USB_STATE_ADDRESS);
2450 		spin_lock(&udc->lock);
2451 	}
2452 
2453 	return 0;
2454 }
2455 
2456 static int usbf_handle_ep0_setup(struct usbf_ep *ep0)
2457 {
2458 	union {
2459 		struct usb_ctrlrequest ctrlreq;
2460 		u32 raw[2];
2461 	} crq;
2462 	struct usbf_udc *udc = ep0->udc;
2463 	int ret;
2464 
2465 	/* Read setup data (ie the USB control request) */
2466 	crq.raw[0] = usbf_reg_readl(udc, USBF_REG_SETUP_DATA0);
2467 	crq.raw[1] = usbf_reg_readl(udc, USBF_REG_SETUP_DATA1);
2468 
2469 	dev_dbg(ep0->udc->dev,
2470 		"ep0 req%02x.%02x, wValue 0x%04x, wIndex 0x%04x, wLength 0x%04x\n",
2471 		crq.ctrlreq.bRequestType, crq.ctrlreq.bRequest,
2472 		crq.ctrlreq.wValue, crq.ctrlreq.wIndex, crq.ctrlreq.wLength);
2473 
2474 	/* Set current EP0 state according to the received request */
2475 	if (crq.ctrlreq.wLength) {
2476 		if (crq.ctrlreq.bRequestType & USB_DIR_IN) {
2477 			udc->ep0state = EP0_IN_DATA_PHASE;
2478 			usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
2479 				USBF_EP0_INAK,
2480 				USBF_EP0_INAK_EN);
2481 			ep0->is_in = 1;
2482 		} else {
2483 			udc->ep0state = EP0_OUT_DATA_PHASE;
2484 			usbf_ep_reg_bitclr(ep0, USBF_REG_EP0_CONTROL,
2485 				USBF_EP0_ONAK);
2486 			ep0->is_in = 0;
2487 		}
2488 	} else {
2489 		udc->ep0state = EP0_IN_STATUS_START_PHASE;
2490 		ep0->is_in = 1;
2491 	}
2492 
2493 	/* We starts a new control transfer -> Clear the delayed status flag */
2494 	ep0->delayed_status = 0;
2495 
2496 	if ((crq.ctrlreq.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) {
2497 		/* This is not a USB standard request -> delelate */
2498 		goto delegate;
2499 	}
2500 
2501 	switch (crq.ctrlreq.bRequest) {
2502 	case USB_REQ_GET_STATUS:
2503 		ret = usbf_req_get_status(udc, &crq.ctrlreq);
2504 		break;
2505 
2506 	case USB_REQ_CLEAR_FEATURE:
2507 		ret = usbf_req_clear_set_feature(udc, &crq.ctrlreq, false);
2508 		break;
2509 
2510 	case USB_REQ_SET_FEATURE:
2511 		ret = usbf_req_clear_set_feature(udc, &crq.ctrlreq, true);
2512 		break;
2513 
2514 	case USB_REQ_SET_ADDRESS:
2515 		ret = usbf_req_set_address(udc, &crq.ctrlreq);
2516 		break;
2517 
2518 	case USB_REQ_SET_CONFIGURATION:
2519 		ret = usbf_req_set_configuration(udc, &crq.ctrlreq);
2520 		break;
2521 
2522 	default:
2523 		goto delegate;
2524 	}
2525 
2526 	return ret;
2527 
2528 delegate:
2529 	return usbf_req_delegate(udc, &crq.ctrlreq);
2530 }
2531 
2532 static int usbf_handle_ep0_data_status(struct usbf_ep *ep0,
2533 				  const char *ep0state_name,
2534 				  enum usbf_ep0state next_ep0state)
2535 {
2536 	struct usbf_udc *udc = ep0->udc;
2537 	int ret;
2538 
2539 	ret = usbf_ep_process_queue(ep0);
2540 	switch (ret) {
2541 	case -ENOENT:
2542 		dev_err(udc->dev,
2543 			"no request available for ep0 %s phase\n",
2544 			ep0state_name);
2545 		break;
2546 	case -EINPROGRESS:
2547 		/* More data needs to be processed */
2548 		ret = 0;
2549 		break;
2550 	case 0:
2551 		/* All requests in the queue are processed */
2552 		udc->ep0state = next_ep0state;
2553 		break;
2554 	default:
2555 		dev_err(udc->dev,
2556 			"process queue failed for ep0 %s phase (%d)\n",
2557 			ep0state_name, ret);
2558 		break;
2559 	}
2560 	return ret;
2561 }
2562 
2563 static int usbf_handle_ep0_out_status_start(struct usbf_ep *ep0)
2564 {
2565 	struct usbf_udc *udc = ep0->udc;
2566 	struct usbf_req *req;
2567 
2568 	usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
2569 				USBF_EP0_ONAK,
2570 				USBF_EP0_PIDCLR);
2571 	ep0->is_in = 0;
2572 
2573 	req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
2574 	if (!req) {
2575 		usbf_ep0_fill_req(ep0, &udc->setup_reply, NULL, 0, NULL);
2576 		usbf_ep0_queue(ep0, &udc->setup_reply, GFP_ATOMIC);
2577 	} else {
2578 		if (req->req.length) {
2579 			dev_err(udc->dev,
2580 				"queued request length %u for ep0 out status phase\n",
2581 				req->req.length);
2582 		}
2583 	}
2584 	udc->ep0state = EP0_OUT_STATUS_PHASE;
2585 	return 0;
2586 }
2587 
2588 static int usbf_handle_ep0_in_status_start(struct usbf_ep *ep0)
2589 {
2590 	struct usbf_udc *udc = ep0->udc;
2591 	struct usbf_req *req;
2592 	int ret;
2593 
2594 	usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
2595 				USBF_EP0_INAK,
2596 				USBF_EP0_INAK_EN | USBF_EP0_PIDCLR);
2597 	ep0->is_in = 1;
2598 
2599 	/* Queue request for status if needed */
2600 	req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
2601 	if (!req) {
2602 		if (ep0->delayed_status) {
2603 			dev_dbg(ep0->udc->dev,
2604 				"EP0_IN_STATUS_START_PHASE ep0->delayed_status set\n");
2605 			udc->ep0state = EP0_IN_STATUS_PHASE;
2606 			return 0;
2607 		}
2608 
2609 		usbf_ep0_fill_req(ep0, &udc->setup_reply, NULL,
2610 			  0, NULL);
2611 		usbf_ep0_queue(ep0, &udc->setup_reply,
2612 			       GFP_ATOMIC);
2613 
2614 		req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
2615 	} else {
2616 		if (req->req.length) {
2617 			dev_err(udc->dev,
2618 				"queued request length %u for ep0 in status phase\n",
2619 				req->req.length);
2620 		}
2621 	}
2622 
2623 	ret = usbf_ep0_pio_in(ep0, req);
2624 	if (ret != -EINPROGRESS) {
2625 		usbf_ep_req_done(ep0, req, ret);
2626 		udc->ep0state = EP0_IN_STATUS_END_PHASE;
2627 		return 0;
2628 	}
2629 
2630 	udc->ep0state = EP0_IN_STATUS_PHASE;
2631 	return 0;
2632 }
2633 
2634 static void usbf_ep0_interrupt(struct usbf_ep *ep0)
2635 {
2636 	struct usbf_udc *udc = ep0->udc;
2637 	u32 sts, prev_sts;
2638 	int prev_ep0state;
2639 	int ret;
2640 
2641 	ep0->status = usbf_ep_reg_readl(ep0, USBF_REG_EP0_STATUS);
2642 	usbf_ep_reg_writel(ep0, USBF_REG_EP0_STATUS, ~ep0->status);
2643 
2644 	dev_dbg(ep0->udc->dev, "ep0 status=0x%08x, enable=%08x\n, ctrl=0x%08x\n",
2645 		ep0->status,
2646 		usbf_ep_reg_readl(ep0, USBF_REG_EP0_INT_ENA),
2647 		usbf_ep_reg_readl(ep0, USBF_REG_EP0_CONTROL));
2648 
2649 	sts = ep0->status & (USBF_EP0_SETUP_INT | USBF_EP0_IN_INT | USBF_EP0_OUT_INT |
2650 			     USBF_EP0_OUT_NULL_INT | USBF_EP0_STG_START_INT |
2651 			     USBF_EP0_STG_END_INT);
2652 
2653 	ret = 0;
2654 	do {
2655 		dev_dbg(ep0->udc->dev, "udc->ep0state=%d\n", udc->ep0state);
2656 
2657 		prev_sts = sts;
2658 		prev_ep0state = udc->ep0state;
2659 		switch (udc->ep0state) {
2660 		case EP0_IDLE:
2661 			if (!(sts & USBF_EP0_SETUP_INT))
2662 				break;
2663 
2664 			sts &= ~USBF_EP0_SETUP_INT;
2665 			dev_dbg(ep0->udc->dev, "ep0 handle setup\n");
2666 			ret = usbf_handle_ep0_setup(ep0);
2667 			break;
2668 
2669 		case EP0_IN_DATA_PHASE:
2670 			if (!(sts & USBF_EP0_IN_INT))
2671 				break;
2672 
2673 			sts &= ~USBF_EP0_IN_INT;
2674 			dev_dbg(ep0->udc->dev, "ep0 handle in data phase\n");
2675 			ret = usbf_handle_ep0_data_status(ep0,
2676 				"in data", EP0_OUT_STATUS_START_PHASE);
2677 			break;
2678 
2679 		case EP0_OUT_STATUS_START_PHASE:
2680 			if (!(sts & USBF_EP0_STG_START_INT))
2681 				break;
2682 
2683 			sts &= ~USBF_EP0_STG_START_INT;
2684 			dev_dbg(ep0->udc->dev, "ep0 handle out status start phase\n");
2685 			ret = usbf_handle_ep0_out_status_start(ep0);
2686 			break;
2687 
2688 		case EP0_OUT_STATUS_PHASE:
2689 			if (!(sts & (USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT)))
2690 				break;
2691 
2692 			sts &= ~(USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT);
2693 			dev_dbg(ep0->udc->dev, "ep0 handle out status phase\n");
2694 			ret = usbf_handle_ep0_data_status(ep0,
2695 				"out status",
2696 				EP0_OUT_STATUS_END_PHASE);
2697 			break;
2698 
2699 		case EP0_OUT_STATUS_END_PHASE:
2700 			if (!(sts & (USBF_EP0_STG_END_INT | USBF_EP0_SETUP_INT)))
2701 				break;
2702 
2703 			sts &= ~USBF_EP0_STG_END_INT;
2704 			dev_dbg(ep0->udc->dev, "ep0 handle out status end phase\n");
2705 			udc->ep0state = EP0_IDLE;
2706 			break;
2707 
2708 		case EP0_OUT_DATA_PHASE:
2709 			if (!(sts & (USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT)))
2710 				break;
2711 
2712 			sts &= ~(USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT);
2713 			dev_dbg(ep0->udc->dev, "ep0 handle out data phase\n");
2714 			ret = usbf_handle_ep0_data_status(ep0,
2715 				"out data", EP0_IN_STATUS_START_PHASE);
2716 			break;
2717 
2718 		case EP0_IN_STATUS_START_PHASE:
2719 			if (!(sts & USBF_EP0_STG_START_INT))
2720 				break;
2721 
2722 			sts &= ~USBF_EP0_STG_START_INT;
2723 			dev_dbg(ep0->udc->dev, "ep0 handle in status start phase\n");
2724 			ret = usbf_handle_ep0_in_status_start(ep0);
2725 			break;
2726 
2727 		case EP0_IN_STATUS_PHASE:
2728 			if (!(sts & USBF_EP0_IN_INT))
2729 				break;
2730 
2731 			sts &= ~USBF_EP0_IN_INT;
2732 			dev_dbg(ep0->udc->dev, "ep0 handle in status phase\n");
2733 			ret = usbf_handle_ep0_data_status(ep0,
2734 				"in status", EP0_IN_STATUS_END_PHASE);
2735 			break;
2736 
2737 		case EP0_IN_STATUS_END_PHASE:
2738 			if (!(sts & (USBF_EP0_STG_END_INT | USBF_EP0_SETUP_INT)))
2739 				break;
2740 
2741 			sts &= ~USBF_EP0_STG_END_INT;
2742 			dev_dbg(ep0->udc->dev, "ep0 handle in status end\n");
2743 			udc->ep0state = EP0_IDLE;
2744 			break;
2745 
2746 		default:
2747 			udc->ep0state = EP0_IDLE;
2748 			break;
2749 		}
2750 
2751 		if (ret) {
2752 			dev_dbg(ep0->udc->dev, "ep0 failed (%d)\n", ret);
2753 			/* Failure -> stall.
2754 			 * This stall state will be automatically cleared when
2755 			 * the IP receives the next SETUP packet
2756 			 */
2757 			usbf_ep_stall(ep0, true);
2758 
2759 			/* Remove anything that was pending */
2760 			usbf_ep_nuke(ep0, -EPROTO);
2761 
2762 			udc->ep0state = EP0_IDLE;
2763 			break;
2764 		}
2765 
2766 	} while ((prev_ep0state != udc->ep0state) || (prev_sts != sts));
2767 
2768 	dev_dbg(ep0->udc->dev, "ep0 done udc->ep0state=%d, status=0x%08x. next=0x%08x\n",
2769 		udc->ep0state, sts,
2770 		usbf_ep_reg_readl(ep0, USBF_REG_EP0_STATUS));
2771 }
2772 
2773 static void usbf_epn_process_queue(struct usbf_ep *epn)
2774 {
2775 	int ret;
2776 
2777 	ret = usbf_ep_process_queue(epn);
2778 	switch (ret) {
2779 	case -ENOENT:
2780 		dev_warn(epn->udc->dev, "ep%u %s, no request available\n",
2781 			epn->id, epn->is_in ? "in" : "out");
2782 		break;
2783 	case -EINPROGRESS:
2784 		/* More data needs to be processed */
2785 		ret = 0;
2786 		break;
2787 	case 0:
2788 		/* All requests in the queue are processed */
2789 		break;
2790 	default:
2791 		dev_err(epn->udc->dev, "ep%u %s, process queue failed (%d)\n",
2792 			epn->id, epn->is_in ? "in" : "out", ret);
2793 		break;
2794 	}
2795 
2796 	if (ret) {
2797 		dev_dbg(epn->udc->dev, "ep%u %s failed (%d)\n", epn->id,
2798 			epn->is_in ? "in" : "out", ret);
2799 		usbf_ep_stall(epn, true);
2800 		usbf_ep_nuke(epn, ret);
2801 	}
2802 }
2803 
2804 static void usbf_epn_interrupt(struct usbf_ep *epn)
2805 {
2806 	u32 sts;
2807 	u32 ena;
2808 
2809 	epn->status = usbf_ep_reg_readl(epn, USBF_REG_EPN_STATUS);
2810 	ena = usbf_ep_reg_readl(epn, USBF_REG_EPN_INT_ENA);
2811 	usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(epn->status & ena));
2812 
2813 	dev_dbg(epn->udc->dev, "ep%u %s status=0x%08x, enable=%08x\n, ctrl=0x%08x\n",
2814 		epn->id, epn->is_in ? "in" : "out", epn->status, ena,
2815 		usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL));
2816 
2817 	if (epn->disabled) {
2818 		dev_warn(epn->udc->dev, "ep%u %s, interrupt while disabled\n",
2819 			epn->id, epn->is_in ? "in" : "out");
2820 		return;
2821 	}
2822 
2823 	sts = epn->status & ena;
2824 
2825 	if (sts & (USBF_EPN_IN_END_INT | USBF_EPN_IN_INT)) {
2826 		sts &= ~(USBF_EPN_IN_END_INT | USBF_EPN_IN_INT);
2827 		dev_dbg(epn->udc->dev, "ep%u %s process queue (in interrupts)\n",
2828 			epn->id, epn->is_in ? "in" : "out");
2829 		usbf_epn_process_queue(epn);
2830 	}
2831 
2832 	if (sts & (USBF_EPN_OUT_END_INT | USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT)) {
2833 		sts &= ~(USBF_EPN_OUT_END_INT | USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
2834 		dev_dbg(epn->udc->dev, "ep%u %s process queue (out interrupts)\n",
2835 			epn->id, epn->is_in ? "in" : "out");
2836 		usbf_epn_process_queue(epn);
2837 	}
2838 
2839 	dev_dbg(epn->udc->dev, "ep%u %s done status=0x%08x. next=0x%08x\n",
2840 		epn->id, epn->is_in ? "in" : "out",
2841 		sts, usbf_ep_reg_readl(epn, USBF_REG_EPN_STATUS));
2842 }
2843 
2844 static void usbf_ep_reset(struct usbf_ep *ep)
2845 {
2846 	ep->status = 0;
2847 	/* Remove anything that was pending */
2848 	usbf_ep_nuke(ep, -ESHUTDOWN);
2849 }
2850 
2851 static void usbf_reset(struct usbf_udc *udc)
2852 {
2853 	int i;
2854 
2855 	for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
2856 		if (udc->ep[i].disabled)
2857 			continue;
2858 
2859 		usbf_ep_reset(&udc->ep[i]);
2860 	}
2861 
2862 	if (usbf_reg_readl(udc, USBF_REG_USB_STATUS) & USBF_USB_SPEED_MODE)
2863 		udc->gadget.speed = USB_SPEED_HIGH;
2864 	else
2865 		udc->gadget.speed = USB_SPEED_FULL;
2866 
2867 	/* Remote wakeup feature must be disabled on USB bus reset */
2868 	udc->is_remote_wakeup = false;
2869 
2870 	/* Enable endpoint zero */
2871 	usbf_ep0_enable(&udc->ep[0]);
2872 
2873 	if (udc->driver) {
2874 		/* Signal the reset */
2875 		spin_unlock(&udc->lock);
2876 		usb_gadget_udc_reset(&udc->gadget, udc->driver);
2877 		spin_lock(&udc->lock);
2878 	}
2879 }
2880 
2881 static void usbf_driver_suspend(struct usbf_udc *udc)
2882 {
2883 	if (udc->is_usb_suspended) {
2884 		dev_dbg(udc->dev, "already suspended\n");
2885 		return;
2886 	}
2887 
2888 	dev_dbg(udc->dev, "do usb suspend\n");
2889 	udc->is_usb_suspended = true;
2890 
2891 	if (udc->driver && udc->driver->suspend) {
2892 		spin_unlock(&udc->lock);
2893 		udc->driver->suspend(&udc->gadget);
2894 		spin_lock(&udc->lock);
2895 
2896 		/* The datasheet tells to set the USB_CONTROL register SUSPEND
2897 		 * bit when the USB bus suspend is detected.
2898 		 * This bit stops the clocks (clocks for EPC, SIE, USBPHY) but
2899 		 * these clocks seems not used only by the USB device. Some
2900 		 * UARTs can be lost ...
2901 		 * So, do not set the USB_CONTROL register SUSPEND bit.
2902 		 */
2903 	}
2904 }
2905 
2906 static void usbf_driver_resume(struct usbf_udc *udc)
2907 {
2908 	if (!udc->is_usb_suspended)
2909 		return;
2910 
2911 	dev_dbg(udc->dev, "do usb resume\n");
2912 	udc->is_usb_suspended = false;
2913 
2914 	if (udc->driver && udc->driver->resume) {
2915 		spin_unlock(&udc->lock);
2916 		udc->driver->resume(&udc->gadget);
2917 		spin_lock(&udc->lock);
2918 	}
2919 }
2920 
2921 static irqreturn_t usbf_epc_irq(int irq, void *_udc)
2922 {
2923 	struct usbf_udc *udc = (struct usbf_udc *)_udc;
2924 	unsigned long flags;
2925 	struct usbf_ep *ep;
2926 	u32 int_sts;
2927 	u32 int_en;
2928 	int i;
2929 
2930 	spin_lock_irqsave(&udc->lock, flags);
2931 
2932 	int_en = usbf_reg_readl(udc, USBF_REG_USB_INT_ENA);
2933 	int_sts = usbf_reg_readl(udc, USBF_REG_USB_INT_STA) & int_en;
2934 	usbf_reg_writel(udc, USBF_REG_USB_INT_STA, ~int_sts);
2935 
2936 	dev_dbg(udc->dev, "int_sts=0x%08x\n", int_sts);
2937 
2938 	if (int_sts & USBF_USB_RSUM_INT) {
2939 		dev_dbg(udc->dev, "handle resume\n");
2940 		usbf_driver_resume(udc);
2941 	}
2942 
2943 	if (int_sts & USBF_USB_USB_RST_INT) {
2944 		dev_dbg(udc->dev, "handle bus reset\n");
2945 		usbf_driver_resume(udc);
2946 		usbf_reset(udc);
2947 	}
2948 
2949 	if (int_sts & USBF_USB_SPEED_MODE_INT) {
2950 		if (usbf_reg_readl(udc, USBF_REG_USB_STATUS) & USBF_USB_SPEED_MODE)
2951 			udc->gadget.speed = USB_SPEED_HIGH;
2952 		else
2953 			udc->gadget.speed = USB_SPEED_FULL;
2954 		dev_dbg(udc->dev, "handle speed change (%s)\n",
2955 			udc->gadget.speed == USB_SPEED_HIGH ? "High" : "Full");
2956 	}
2957 
2958 	if (int_sts & USBF_USB_EPN_INT(0)) {
2959 		usbf_driver_resume(udc);
2960 		usbf_ep0_interrupt(&udc->ep[0]);
2961 	}
2962 
2963 	for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
2964 		ep = &udc->ep[i];
2965 
2966 		if (int_sts & USBF_USB_EPN_INT(i)) {
2967 			usbf_driver_resume(udc);
2968 			usbf_epn_interrupt(ep);
2969 		}
2970 	}
2971 
2972 	if (int_sts & USBF_USB_SPND_INT) {
2973 		dev_dbg(udc->dev, "handle suspend\n");
2974 		usbf_driver_suspend(udc);
2975 	}
2976 
2977 	spin_unlock_irqrestore(&udc->lock, flags);
2978 
2979 	return IRQ_HANDLED;
2980 }
2981 
2982 static irqreturn_t usbf_ahb_epc_irq(int irq, void *_udc)
2983 {
2984 	struct usbf_udc *udc = (struct usbf_udc *)_udc;
2985 	unsigned long flags;
2986 	struct usbf_ep *epn;
2987 	u32 sysbint;
2988 	void (*ep_action)(struct usbf_ep *epn);
2989 	int i;
2990 
2991 	spin_lock_irqsave(&udc->lock, flags);
2992 
2993 	/* Read and ack interrupts */
2994 	sysbint = usbf_reg_readl(udc, USBF_REG_AHBBINT);
2995 	usbf_reg_writel(udc, USBF_REG_AHBBINT, sysbint);
2996 
2997 	if ((sysbint & USBF_SYS_VBUS_INT) == USBF_SYS_VBUS_INT) {
2998 		if (usbf_reg_readl(udc, USBF_REG_EPCTR) & USBF_SYS_VBUS_LEVEL) {
2999 			dev_dbg(udc->dev, "handle vbus (1)\n");
3000 			spin_unlock(&udc->lock);
3001 			usb_udc_vbus_handler(&udc->gadget, true);
3002 			usb_gadget_set_state(&udc->gadget, USB_STATE_POWERED);
3003 			spin_lock(&udc->lock);
3004 		} else {
3005 			dev_dbg(udc->dev, "handle vbus (0)\n");
3006 			udc->is_usb_suspended = false;
3007 			spin_unlock(&udc->lock);
3008 			usb_udc_vbus_handler(&udc->gadget, false);
3009 			usb_gadget_set_state(&udc->gadget,
3010 					     USB_STATE_NOTATTACHED);
3011 			spin_lock(&udc->lock);
3012 		}
3013 	}
3014 
3015 	for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
3016 		if (sysbint & USBF_SYS_DMA_ENDINT_EPN(i)) {
3017 			epn = &udc->ep[i];
3018 			dev_dbg(epn->udc->dev,
3019 				"ep%u handle DMA complete. action=%ps\n",
3020 				epn->id, epn->bridge_on_dma_end);
3021 			ep_action = epn->bridge_on_dma_end;
3022 			if (ep_action) {
3023 				epn->bridge_on_dma_end = NULL;
3024 				ep_action(epn);
3025 			}
3026 		}
3027 	}
3028 
3029 	spin_unlock_irqrestore(&udc->lock, flags);
3030 
3031 	return IRQ_HANDLED;
3032 }
3033 
3034 static int usbf_udc_start(struct usb_gadget *gadget,
3035 			  struct usb_gadget_driver *driver)
3036 {
3037 	struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
3038 	unsigned long flags;
3039 
3040 	dev_info(udc->dev, "start (driver '%s')\n", driver->driver.name);
3041 
3042 	spin_lock_irqsave(&udc->lock, flags);
3043 
3044 	/* hook up the driver */
3045 	udc->driver = driver;
3046 
3047 	/* Enable VBUS interrupt */
3048 	usbf_reg_writel(udc, USBF_REG_AHBBINTEN, USBF_SYS_VBUS_INTEN);
3049 
3050 	spin_unlock_irqrestore(&udc->lock, flags);
3051 
3052 	return 0;
3053 }
3054 
3055 static int usbf_udc_stop(struct usb_gadget *gadget)
3056 {
3057 	struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
3058 	unsigned long flags;
3059 
3060 	spin_lock_irqsave(&udc->lock, flags);
3061 
3062 	/* Disable VBUS interrupt */
3063 	usbf_reg_writel(udc, USBF_REG_AHBBINTEN, 0);
3064 
3065 	udc->driver = NULL;
3066 
3067 	spin_unlock_irqrestore(&udc->lock, flags);
3068 
3069 	dev_info(udc->dev, "stopped\n");
3070 
3071 	return 0;
3072 }
3073 
3074 static int usbf_get_frame(struct usb_gadget *gadget)
3075 {
3076 	struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
3077 
3078 	return USBF_USB_GET_FRAME(usbf_reg_readl(udc, USBF_REG_USB_ADDRESS));
3079 }
3080 
3081 static void usbf_attach(struct usbf_udc *udc)
3082 {
3083 	/* Enable USB signal to Function PHY
3084 	 * D+ signal Pull-up
3085 	 * Disable endpoint 0, it will be automatically enable when a USB reset
3086 	 * is received.
3087 	 * Disable the other endpoints
3088 	 */
3089 	usbf_reg_clrset(udc, USBF_REG_USB_CONTROL,
3090 		USBF_USB_CONNECTB | USBF_USB_DEFAULT | USBF_USB_CONF,
3091 		USBF_USB_PUE2);
3092 
3093 	/* Enable reset and mode change interrupts */
3094 	usbf_reg_bitset(udc, USBF_REG_USB_INT_ENA,
3095 		USBF_USB_USB_RST_EN | USBF_USB_SPEED_MODE_EN | USBF_USB_RSUM_EN | USBF_USB_SPND_EN);
3096 }
3097 
3098 static void usbf_detach(struct usbf_udc *udc)
3099 {
3100 	int i;
3101 
3102 	/* Disable interrupts */
3103 	usbf_reg_writel(udc, USBF_REG_USB_INT_ENA, 0);
3104 
3105 	for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
3106 		if (udc->ep[i].disabled)
3107 			continue;
3108 
3109 		usbf_ep_reset(&udc->ep[i]);
3110 	}
3111 
3112 	/* Disable USB signal to Function PHY
3113 	 * Do not Pull-up D+ signal
3114 	 * Disable endpoint 0
3115 	 * Disable the other endpoints
3116 	 */
3117 	usbf_reg_clrset(udc, USBF_REG_USB_CONTROL,
3118 		USBF_USB_PUE2 | USBF_USB_DEFAULT | USBF_USB_CONF,
3119 		USBF_USB_CONNECTB);
3120 }
3121 
3122 static int usbf_pullup(struct usb_gadget *gadget, int is_on)
3123 {
3124 	struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
3125 	unsigned long flags;
3126 
3127 	dev_dbg(udc->dev, "pullup %d\n", is_on);
3128 
3129 	spin_lock_irqsave(&udc->lock, flags);
3130 	if (is_on)
3131 		usbf_attach(udc);
3132 	else
3133 		usbf_detach(udc);
3134 	spin_unlock_irqrestore(&udc->lock, flags);
3135 
3136 	return 0;
3137 }
3138 
3139 static int usbf_udc_set_selfpowered(struct usb_gadget *gadget,
3140 				    int is_selfpowered)
3141 {
3142 	struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
3143 	unsigned long flags;
3144 
3145 	spin_lock_irqsave(&udc->lock, flags);
3146 	gadget->is_selfpowered = (is_selfpowered != 0);
3147 	spin_unlock_irqrestore(&udc->lock, flags);
3148 
3149 	return 0;
3150 }
3151 
3152 static int usbf_udc_wakeup(struct usb_gadget *gadget)
3153 {
3154 	struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
3155 	unsigned long flags;
3156 	int ret;
3157 
3158 	spin_lock_irqsave(&udc->lock, flags);
3159 
3160 	if (!udc->is_remote_wakeup) {
3161 		dev_dbg(udc->dev, "remote wakeup not allowed\n");
3162 		ret = -EINVAL;
3163 		goto end;
3164 	}
3165 
3166 	dev_dbg(udc->dev, "do wakeup\n");
3167 
3168 	/* Send the resume signal */
3169 	usbf_reg_bitset(udc, USBF_REG_USB_CONTROL, USBF_USB_RSUM_IN);
3170 	usbf_reg_bitclr(udc, USBF_REG_USB_CONTROL, USBF_USB_RSUM_IN);
3171 
3172 	ret = 0;
3173 end:
3174 	spin_unlock_irqrestore(&udc->lock, flags);
3175 	return ret;
3176 }
3177 
3178 static struct usb_gadget_ops usbf_gadget_ops = {
3179 	.get_frame = usbf_get_frame,
3180 	.pullup = usbf_pullup,
3181 	.udc_start = usbf_udc_start,
3182 	.udc_stop = usbf_udc_stop,
3183 	.set_selfpowered = usbf_udc_set_selfpowered,
3184 	.wakeup = usbf_udc_wakeup,
3185 };
3186 
3187 static int usbf_epn_check(struct usbf_ep *epn)
3188 {
3189 	const char *type_txt;
3190 	const char *buf_txt;
3191 	int ret = 0;
3192 	u32 ctrl;
3193 
3194 	ctrl = usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL);
3195 
3196 	switch (ctrl & USBF_EPN_MODE_MASK) {
3197 	case USBF_EPN_MODE_BULK:
3198 		type_txt = "bulk";
3199 		if (epn->ep.caps.type_control || epn->ep.caps.type_iso ||
3200 		    !epn->ep.caps.type_bulk || epn->ep.caps.type_int) {
3201 			dev_err(epn->udc->dev,
3202 				"ep%u caps mismatch, bulk expected\n", epn->id);
3203 			ret = -EINVAL;
3204 		}
3205 		break;
3206 	case USBF_EPN_MODE_INTR:
3207 		type_txt = "intr";
3208 		if (epn->ep.caps.type_control || epn->ep.caps.type_iso ||
3209 		    epn->ep.caps.type_bulk || !epn->ep.caps.type_int) {
3210 			dev_err(epn->udc->dev,
3211 				"ep%u caps mismatch, int expected\n", epn->id);
3212 			ret = -EINVAL;
3213 		}
3214 		break;
3215 	case USBF_EPN_MODE_ISO:
3216 		type_txt = "iso";
3217 		if (epn->ep.caps.type_control || !epn->ep.caps.type_iso ||
3218 		    epn->ep.caps.type_bulk || epn->ep.caps.type_int) {
3219 			dev_err(epn->udc->dev,
3220 				"ep%u caps mismatch, iso expected\n", epn->id);
3221 			ret = -EINVAL;
3222 		}
3223 		break;
3224 	default:
3225 		type_txt = "unknown";
3226 		dev_err(epn->udc->dev, "ep%u unknown type\n", epn->id);
3227 		ret = -EINVAL;
3228 		break;
3229 	}
3230 
3231 	if (ctrl & USBF_EPN_BUF_TYPE_DOUBLE) {
3232 		buf_txt = "double";
3233 		if (!usbf_ep_info[epn->id].is_double) {
3234 			dev_err(epn->udc->dev,
3235 				"ep%u buffer mismatch, double expected\n",
3236 				epn->id);
3237 			ret = -EINVAL;
3238 		}
3239 	} else {
3240 		buf_txt = "single";
3241 		if (usbf_ep_info[epn->id].is_double) {
3242 			dev_err(epn->udc->dev,
3243 				"ep%u buffer mismatch, single expected\n",
3244 				epn->id);
3245 			ret = -EINVAL;
3246 		}
3247 	}
3248 
3249 	dev_dbg(epn->udc->dev, "ep%u (%s) %s, %s buffer %u, checked %s\n",
3250 		 epn->id, epn->ep.name, type_txt, buf_txt,
3251 		 epn->ep.maxpacket_limit, ret ? "failed" : "ok");
3252 
3253 	return ret;
3254 }
3255 
3256 static int usbf_probe(struct platform_device *pdev)
3257 {
3258 	struct device *dev = &pdev->dev;
3259 	struct usbf_udc *udc;
3260 	struct usbf_ep *ep;
3261 	unsigned int i;
3262 	int irq;
3263 	int ret;
3264 
3265 	udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
3266 	if (!udc)
3267 		return -ENOMEM;
3268 	platform_set_drvdata(pdev, udc);
3269 
3270 	udc->dev = dev;
3271 	spin_lock_init(&udc->lock);
3272 
3273 	udc->regs = devm_platform_ioremap_resource(pdev, 0);
3274 	if (IS_ERR(udc->regs))
3275 		return PTR_ERR(udc->regs);
3276 
3277 	devm_pm_runtime_enable(&pdev->dev);
3278 	ret = pm_runtime_resume_and_get(&pdev->dev);
3279 	if (ret < 0)
3280 		return ret;
3281 
3282 	dev_info(dev, "USBF version: %08x\n",
3283 		usbf_reg_readl(udc, USBF_REG_USBSSVER));
3284 
3285 	/* Resetting the PLL is handled via the clock driver as it has common
3286 	 * registers with USB Host
3287 	 */
3288 	usbf_reg_bitclr(udc, USBF_REG_EPCTR, USBF_SYS_EPC_RST);
3289 
3290 	/* modify in register gadget process */
3291 	udc->gadget.speed = USB_SPEED_FULL;
3292 	udc->gadget.max_speed = USB_SPEED_HIGH;
3293 	udc->gadget.ops = &usbf_gadget_ops;
3294 
3295 	udc->gadget.name = dev->driver->name;
3296 	udc->gadget.dev.parent = dev;
3297 	udc->gadget.ep0 = &udc->ep[0].ep;
3298 
3299 	/* The hardware DMA controller needs dma addresses aligned on 32bit.
3300 	 * A fallback to pio is done if DMA addresses are not aligned.
3301 	 */
3302 	udc->gadget.quirk_avoids_skb_reserve = 1;
3303 
3304 	INIT_LIST_HEAD(&udc->gadget.ep_list);
3305 	/* we have a canned request structure to allow sending packets as reply
3306 	 * to get_status requests
3307 	 */
3308 	INIT_LIST_HEAD(&udc->setup_reply.queue);
3309 
3310 	for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
3311 		ep = &udc->ep[i];
3312 
3313 		if (!(usbf_reg_readl(udc, USBF_REG_USBSSCONF) &
3314 		      USBF_SYS_EP_AVAILABLE(i))) {
3315 			continue;
3316 		}
3317 
3318 		INIT_LIST_HEAD(&ep->queue);
3319 
3320 		ep->id = i;
3321 		ep->disabled = 1;
3322 		ep->udc = udc;
3323 		ep->ep.ops = &usbf_ep_ops;
3324 		ep->ep.name = usbf_ep_info[i].name;
3325 		ep->ep.caps = usbf_ep_info[i].caps;
3326 		usb_ep_set_maxpacket_limit(&ep->ep,
3327 					   usbf_ep_info[i].maxpacket_limit);
3328 
3329 		if (ep->id == 0) {
3330 			ep->regs = ep->udc->regs + USBF_BASE_EP0;
3331 		} else {
3332 			ep->regs = ep->udc->regs + USBF_BASE_EPN(ep->id - 1);
3333 			ret = usbf_epn_check(ep);
3334 			if (ret)
3335 				return ret;
3336 			if (usbf_reg_readl(udc, USBF_REG_USBSSCONF) &
3337 			    USBF_SYS_DMA_AVAILABLE(i)) {
3338 				ep->dma_regs = ep->udc->regs +
3339 					       USBF_BASE_DMA_EPN(ep->id - 1);
3340 			}
3341 			list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
3342 		}
3343 	}
3344 
3345 	irq = platform_get_irq(pdev, 0);
3346 	if (irq < 0)
3347 		return irq;
3348 	ret = devm_request_irq(dev, irq, usbf_epc_irq, 0, "usbf-epc", udc);
3349 	if (ret) {
3350 		dev_err(dev, "cannot request irq %d err %d\n", irq, ret);
3351 		return ret;
3352 	}
3353 
3354 	irq = platform_get_irq(pdev, 1);
3355 	if (irq < 0)
3356 		return irq;
3357 	ret = devm_request_irq(dev, irq, usbf_ahb_epc_irq, 0, "usbf-ahb-epc", udc);
3358 	if (ret) {
3359 		dev_err(dev, "cannot request irq %d err %d\n", irq, ret);
3360 		return ret;
3361 	}
3362 
3363 	usbf_reg_bitset(udc, USBF_REG_AHBMCTR, USBF_SYS_WBURST_TYPE);
3364 
3365 	usbf_reg_bitset(udc, USBF_REG_USB_CONTROL,
3366 		USBF_USB_INT_SEL | USBF_USB_SOF_RCV | USBF_USB_SOF_CLK_MODE);
3367 
3368 	ret = usb_add_gadget_udc(dev, &udc->gadget);
3369 	if (ret)
3370 		return ret;
3371 
3372 	return 0;
3373 }
3374 
3375 static int usbf_remove(struct platform_device *pdev)
3376 {
3377 	struct usbf_udc *udc = platform_get_drvdata(pdev);
3378 
3379 	usb_del_gadget_udc(&udc->gadget);
3380 
3381 	pm_runtime_put(&pdev->dev);
3382 
3383 	return 0;
3384 }
3385 
3386 static const struct of_device_id usbf_match[] = {
3387 	{ .compatible = "renesas,rzn1-usbf" },
3388 	{} /* sentinel */
3389 };
3390 MODULE_DEVICE_TABLE(of, usbf_match);
3391 
3392 static struct platform_driver udc_driver = {
3393 	.driver = {
3394 		.name = "usbf_renesas",
3395 		.owner = THIS_MODULE,
3396 		.of_match_table = usbf_match,
3397 	},
3398 	.probe          = usbf_probe,
3399 	.remove         = usbf_remove,
3400 };
3401 
3402 module_platform_driver(udc_driver);
3403 
3404 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
3405 MODULE_DESCRIPTION("Renesas R-Car Gen3 & RZ/N1 USB Function driver");
3406 MODULE_LICENSE("GPL");
3407