1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Renesas USBF USB Function driver
4  *
5  * Copyright 2022 Schneider Electric
6  * Author: Herve Codina <herve.codina@bootlin.com>
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/iopoll.h>
13 #include <linux/kernel.h>
14 #include <linux/kfifo.h>
15 #include <linux/mod_devicetable.h>
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/types.h>
20 #include <linux/usb/composite.h>
21 #include <linux/usb/gadget.h>
22 #include <linux/usb/role.h>
23 
24 #define USBF_NUM_ENDPOINTS	16
25 #define USBF_EP0_MAX_PCKT_SIZE	64
26 
27 /* EPC registers */
28 #define USBF_REG_USB_CONTROL	0x000
29 #define     USBF_USB_PUE2		BIT(2)
30 #define     USBF_USB_CONNECTB		BIT(3)
31 #define     USBF_USB_DEFAULT		BIT(4)
32 #define     USBF_USB_CONF		BIT(5)
33 #define     USBF_USB_SUSPEND		BIT(6)
34 #define     USBF_USB_RSUM_IN		BIT(7)
35 #define     USBF_USB_SOF_RCV		BIT(8)
36 #define     USBF_USB_FORCEFS		BIT(9)
37 #define     USBF_USB_INT_SEL		BIT(10)
38 #define     USBF_USB_SOF_CLK_MODE	BIT(11)
39 
40 #define USBF_REG_USB_STATUS	0x004
41 #define     USBF_USB_RSUM_OUT		BIT(1)
42 #define     USBF_USB_SPND_OUT		BIT(2)
43 #define     USBF_USB_USB_RST		BIT(3)
44 #define     USBF_USB_DEFAULT_ST		BIT(4)
45 #define     USBF_USB_CONF_ST		BIT(5)
46 #define     USBF_USB_SPEED_MODE		BIT(6)
47 #define     USBF_USB_SOF_DELAY_STATUS	BIT(31)
48 
49 #define USBF_REG_USB_ADDRESS	0x008
50 #define     USBF_USB_SOF_STATUS		BIT(15)
51 #define     USBF_USB_SET_USB_ADDR(_a)	((_a) << 16)
52 #define     USBF_USB_GET_FRAME(_r)	((_r) & 0x7FF)
53 
54 #define USBF_REG_SETUP_DATA0	0x018
55 #define USBF_REG_SETUP_DATA1	0x01C
56 #define USBF_REG_USB_INT_STA	0x020
57 #define     USBF_USB_RSUM_INT		BIT(1)
58 #define     USBF_USB_SPND_INT		BIT(2)
59 #define     USBF_USB_USB_RST_INT	BIT(3)
60 #define     USBF_USB_SOF_INT		BIT(4)
61 #define     USBF_USB_SOF_ERROR_INT	BIT(5)
62 #define     USBF_USB_SPEED_MODE_INT	BIT(6)
63 #define     USBF_USB_EPN_INT(_n)	(BIT(8) << (_n)) /* n=0..15 */
64 
65 #define USBF_REG_USB_INT_ENA	0x024
66 #define     USBF_USB_RSUM_EN		BIT(1)
67 #define     USBF_USB_SPND_EN		BIT(2)
68 #define     USBF_USB_USB_RST_EN		BIT(3)
69 #define     USBF_USB_SOF_EN		BIT(4)
70 #define     USBF_USB_SOF_ERROR_EN	BIT(5)
71 #define     USBF_USB_SPEED_MODE_EN	BIT(6)
72 #define     USBF_USB_EPN_EN(_n)		(BIT(8) << (_n)) /* n=0..15 */
73 
74 #define USBF_BASE_EP0		0x028
75 /* EP0 registers offsets from Base + USBF_BASE_EP0 (EP0 regs area) */
76 #define     USBF_REG_EP0_CONTROL	0x00
77 #define         USBF_EP0_ONAK			BIT(0)
78 #define         USBF_EP0_INAK			BIT(1)
79 #define         USBF_EP0_STL			BIT(2)
80 #define         USBF_EP0_PERR_NAK_CLR		BIT(3)
81 #define         USBF_EP0_INAK_EN		BIT(4)
82 #define         USBF_EP0_DW_MASK		(0x3 << 5)
83 #define         USBF_EP0_DW(_s)			((_s) << 5)
84 #define         USBF_EP0_DEND			BIT(7)
85 #define         USBF_EP0_BCLR			BIT(8)
86 #define         USBF_EP0_PIDCLR			BIT(9)
87 #define         USBF_EP0_AUTO			BIT(16)
88 #define         USBF_EP0_OVERSEL		BIT(17)
89 #define         USBF_EP0_STGSEL			BIT(18)
90 
91 #define     USBF_REG_EP0_STATUS		0x04
92 #define         USBF_EP0_SETUP_INT		BIT(0)
93 #define         USBF_EP0_STG_START_INT		BIT(1)
94 #define         USBF_EP0_STG_END_INT		BIT(2)
95 #define         USBF_EP0_STALL_INT		BIT(3)
96 #define         USBF_EP0_IN_INT			BIT(4)
97 #define         USBF_EP0_OUT_INT		BIT(5)
98 #define         USBF_EP0_OUT_OR_INT		BIT(6)
99 #define         USBF_EP0_OUT_NULL_INT		BIT(7)
100 #define         USBF_EP0_IN_EMPTY		BIT(8)
101 #define         USBF_EP0_IN_FULL		BIT(9)
102 #define         USBF_EP0_IN_DATA		BIT(10)
103 #define         USBF_EP0_IN_NAK_INT		BIT(11)
104 #define         USBF_EP0_OUT_EMPTY		BIT(12)
105 #define         USBF_EP0_OUT_FULL		BIT(13)
106 #define         USBF_EP0_OUT_NULL		BIT(14)
107 #define         USBF_EP0_OUT_NAK_INT		BIT(15)
108 #define         USBF_EP0_PERR_NAK_INT		BIT(16)
109 #define         USBF_EP0_PERR_NAK		BIT(17)
110 #define         USBF_EP0_PID			BIT(18)
111 
112 #define     USBF_REG_EP0_INT_ENA	0x08
113 #define         USBF_EP0_SETUP_EN		BIT(0)
114 #define         USBF_EP0_STG_START_EN		BIT(1)
115 #define         USBF_EP0_STG_END_EN		BIT(2)
116 #define         USBF_EP0_STALL_EN		BIT(3)
117 #define         USBF_EP0_IN_EN			BIT(4)
118 #define         USBF_EP0_OUT_EN			BIT(5)
119 #define         USBF_EP0_OUT_OR_EN		BIT(6)
120 #define         USBF_EP0_OUT_NULL_EN		BIT(7)
121 #define         USBF_EP0_IN_NAK_EN		BIT(11)
122 #define         USBF_EP0_OUT_NAK_EN		BIT(15)
123 #define         USBF_EP0_PERR_NAK_EN		BIT(16)
124 
125 #define     USBF_REG_EP0_LENGTH		0x0C
126 #define         USBF_EP0_LDATA			(0x7FF << 0)
127 #define     USBF_REG_EP0_READ		0x10
128 #define     USBF_REG_EP0_WRITE		0x14
129 
130 #define USBF_BASE_EPN(_n)	(0x040 + (_n) * 0x020)
131 /* EPn registers offsets from Base + USBF_BASE_EPN(n-1). n=1..15 */
132 #define     USBF_REG_EPN_CONTROL	0x000
133 #define         USBF_EPN_ONAK			BIT(0)
134 #define         USBF_EPN_OSTL			BIT(2)
135 #define         USBF_EPN_ISTL			BIT(3)
136 #define         USBF_EPN_OSTL_EN		BIT(4)
137 #define         USBF_EPN_DW_MASK		(0x3 << 5)
138 #define         USBF_EPN_DW(_s)			((_s) << 5)
139 #define         USBF_EPN_DEND			BIT(7)
140 #define         USBF_EPN_CBCLR			BIT(8)
141 #define         USBF_EPN_BCLR			BIT(9)
142 #define         USBF_EPN_OPIDCLR		BIT(10)
143 #define         USBF_EPN_IPIDCLR		BIT(11)
144 #define         USBF_EPN_AUTO			BIT(16)
145 #define         USBF_EPN_OVERSEL		BIT(17)
146 #define         USBF_EPN_MODE_MASK		(0x3 << 24)
147 #define         USBF_EPN_MODE_BULK		(0x0 << 24)
148 #define         USBF_EPN_MODE_INTR		(0x1 << 24)
149 #define         USBF_EPN_MODE_ISO		(0x2 << 24)
150 #define         USBF_EPN_DIR0			BIT(26)
151 #define         USBF_EPN_BUF_TYPE_DOUBLE	BIT(30)
152 #define         USBF_EPN_EN			BIT(31)
153 
154 #define     USBF_REG_EPN_STATUS		0x004
155 #define         USBF_EPN_IN_EMPTY		BIT(0)
156 #define         USBF_EPN_IN_FULL		BIT(1)
157 #define         USBF_EPN_IN_DATA		BIT(2)
158 #define         USBF_EPN_IN_INT			BIT(3)
159 #define         USBF_EPN_IN_STALL_INT		BIT(4)
160 #define         USBF_EPN_IN_NAK_ERR_INT		BIT(5)
161 #define         USBF_EPN_IN_END_INT		BIT(7)
162 #define         USBF_EPN_IPID			BIT(10)
163 #define         USBF_EPN_OUT_EMPTY		BIT(16)
164 #define         USBF_EPN_OUT_FULL		BIT(17)
165 #define         USBF_EPN_OUT_NULL_INT		BIT(18)
166 #define         USBF_EPN_OUT_INT		BIT(19)
167 #define         USBF_EPN_OUT_STALL_INT		BIT(20)
168 #define         USBF_EPN_OUT_NAK_ERR_INT	BIT(21)
169 #define         USBF_EPN_OUT_OR_INT		BIT(22)
170 #define         USBF_EPN_OUT_END_INT		BIT(23)
171 #define         USBF_EPN_ISO_CRC		BIT(24)
172 #define         USBF_EPN_ISO_OR			BIT(26)
173 #define         USBF_EPN_OUT_NOTKN		BIT(27)
174 #define         USBF_EPN_ISO_OPID		BIT(28)
175 #define         USBF_EPN_ISO_PIDERR		BIT(29)
176 
177 #define     USBF_REG_EPN_INT_ENA	0x008
178 #define         USBF_EPN_IN_EN			BIT(3)
179 #define         USBF_EPN_IN_STALL_EN		BIT(4)
180 #define         USBF_EPN_IN_NAK_ERR_EN		BIT(5)
181 #define         USBF_EPN_IN_END_EN		BIT(7)
182 #define         USBF_EPN_OUT_NULL_EN		BIT(18)
183 #define         USBF_EPN_OUT_EN			BIT(19)
184 #define         USBF_EPN_OUT_STALL_EN		BIT(20)
185 #define         USBF_EPN_OUT_NAK_ERR_EN		BIT(21)
186 #define         USBF_EPN_OUT_OR_EN		BIT(22)
187 #define         USBF_EPN_OUT_END_EN		BIT(23)
188 
189 #define     USBF_REG_EPN_DMA_CTRL	0x00C
190 #define         USBF_EPN_DMAMODE0		BIT(0)
191 #define         USBF_EPN_DMA_EN			BIT(4)
192 #define         USBF_EPN_STOP_SET		BIT(8)
193 #define         USBF_EPN_BURST_SET		BIT(9)
194 #define         USBF_EPN_DEND_SET		BIT(10)
195 #define         USBF_EPN_STOP_MODE		BIT(11)
196 
197 #define     USBF_REG_EPN_PCKT_ADRS	0x010
198 #define         USBF_EPN_MPKT(_l)		((_l) << 0)
199 #define         USBF_EPN_BASEAD(_a)		((_a) << 16)
200 
201 #define     USBF_REG_EPN_LEN_DCNT	0x014
202 #define         USBF_EPN_GET_LDATA(_r)		((_r) & 0x7FF)
203 #define         USBF_EPN_SET_DMACNT(_c)		((_c) << 16)
204 #define         USBF_EPN_GET_DMACNT(_r)		(((_r) >> 16) & 0x1ff)
205 
206 #define     USBF_REG_EPN_READ		0x018
207 #define     USBF_REG_EPN_WRITE		0x01C
208 
209 /* AHB-EPC Bridge registers */
210 #define USBF_REG_AHBSCTR	0x1000
211 #define USBF_REG_AHBMCTR	0x1004
212 #define     USBF_SYS_WBURST_TYPE	BIT(2)
213 #define     USBF_SYS_ARBITER_CTR	BIT(31)
214 
215 #define USBF_REG_AHBBINT	0x1008
216 #define     USBF_SYS_ERR_MASTER		 (0x0F << 0)
217 #define     USBF_SYS_SBUS_ERRINT0	 BIT(4)
218 #define     USBF_SYS_SBUS_ERRINT1	 BIT(5)
219 #define     USBF_SYS_MBUS_ERRINT	 BIT(6)
220 #define     USBF_SYS_VBUS_INT		 BIT(13)
221 #define     USBF_SYS_DMA_ENDINT_EPN(_n)	 (BIT(16) << (_n)) /* _n=1..15 */
222 
223 #define USBF_REG_AHBBINTEN	0x100C
224 #define     USBF_SYS_SBUS_ERRINT0EN	  BIT(4)
225 #define     USBF_SYS_SBUS_ERRINT1EN	  BIT(5)
226 #define     USBF_SYS_MBUS_ERRINTEN	  BIT(6)
227 #define     USBF_SYS_VBUS_INTEN		  BIT(13)
228 #define     USBF_SYS_DMA_ENDINTEN_EPN(_n) (BIT(16) << (_n)) /* _n=1..15 */
229 
230 #define USBF_REG_EPCTR		0x1010
231 #define     USBF_SYS_EPC_RST		BIT(0)
232 #define     USBF_SYS_PLL_RST		BIT(2)
233 #define     USBF_SYS_PLL_LOCK		BIT(4)
234 #define     USBF_SYS_PLL_RESUME		BIT(5)
235 #define     USBF_SYS_VBUS_LEVEL		BIT(8)
236 #define     USBF_SYS_DIRPD		BIT(12)
237 
238 #define USBF_REG_USBSSVER	0x1020
239 #define USBF_REG_USBSSCONF	0x1024
240 #define    USBF_SYS_DMA_AVAILABLE(_n)	(BIT(0) << (_n)) /* _n=0..15 */
241 #define    USBF_SYS_EP_AVAILABLE(_n)	(BIT(16) << (_n)) /* _n=0..15 */
242 
243 #define USBF_BASE_DMA_EPN(_n)	(0x1110 + (_n) * 0x010)
244 /* EPn DMA registers offsets from Base USBF_BASE_DMA_EPN(n-1). n=1..15*/
245 #define     USBF_REG_DMA_EPN_DCR1	0x00
246 #define         USBF_SYS_EPN_REQEN		BIT(0)
247 #define         USBF_SYS_EPN_DIR0		BIT(1)
248 #define         USBF_SYS_EPN_SET_DMACNT(_c)	((_c) << 16)
249 #define         USBF_SYS_EPN_GET_DMACNT(_r)	(((_r) >> 16) & 0x0FF)
250 
251 #define     USBF_REG_DMA_EPN_DCR2	0x04
252 #define         USBF_SYS_EPN_MPKT(_s)		((_s) << 0)
253 #define         USBF_SYS_EPN_LMPKT(_l)		((_l) << 16)
254 
255 #define     USBF_REG_DMA_EPN_TADR	0x08
256 
257 /* USB request */
258 struct usbf_req {
259 	struct usb_request	req;
260 	struct list_head	queue;
261 	unsigned int		is_zero_sent : 1;
262 	unsigned int		is_mapped : 1;
263 	enum {
264 		USBF_XFER_START,
265 		USBF_XFER_WAIT_DMA,
266 		USBF_XFER_SEND_NULL,
267 		USBF_XFER_WAIT_END,
268 		USBF_XFER_WAIT_DMA_SHORT,
269 		USBF_XFER_WAIT_BRIDGE,
270 	}			xfer_step;
271 	size_t			dma_size;
272 };
273 
274 /* USB Endpoint */
275 struct usbf_ep {
276 	struct usb_ep		ep;
277 	char			name[32];
278 	struct list_head	queue;
279 	unsigned int		is_processing : 1;
280 	unsigned int		is_in : 1;
281 	struct			usbf_udc *udc;
282 	void __iomem		*regs;
283 	void __iomem		*dma_regs;
284 	unsigned int		id : 8;
285 	unsigned int		disabled : 1;
286 	unsigned int		is_wedged : 1;
287 	unsigned int		delayed_status : 1;
288 	u32			status;
289 	void			(*bridge_on_dma_end)(struct usbf_ep *ep);
290 };
291 
292 enum usbf_ep0state {
293 	EP0_IDLE,
294 	EP0_IN_DATA_PHASE,
295 	EP0_OUT_DATA_PHASE,
296 	EP0_OUT_STATUS_START_PHASE,
297 	EP0_OUT_STATUS_PHASE,
298 	EP0_OUT_STATUS_END_PHASE,
299 	EP0_IN_STATUS_START_PHASE,
300 	EP0_IN_STATUS_PHASE,
301 	EP0_IN_STATUS_END_PHASE,
302 };
303 
304 struct usbf_udc {
305 	struct usb_gadget		gadget;
306 	struct usb_gadget_driver	*driver;
307 	struct device			*dev;
308 	void __iomem			*regs;
309 	spinlock_t			lock;
310 	bool				is_remote_wakeup;
311 	bool				is_usb_suspended;
312 	struct usbf_ep			ep[USBF_NUM_ENDPOINTS];
313 	/* for EP0 control messages */
314 	enum usbf_ep0state		ep0state;
315 	struct usbf_req			setup_reply;
316 	u8				ep0_buf[USBF_EP0_MAX_PCKT_SIZE];
317 };
318 
319 struct usbf_ep_info {
320 	const char		*name;
321 	struct usb_ep_caps	caps;
322 	u16			base_addr;
323 	unsigned int		is_double : 1;
324 	u16			maxpacket_limit;
325 };
326 
327 #define USBF_SINGLE_BUFFER 0
328 #define USBF_DOUBLE_BUFFER 1
329 #define USBF_EP_INFO(_name, _caps, _base_addr, _is_double, _maxpacket_limit)  \
330 	{                                                                     \
331 		.name            = _name,                                     \
332 		.caps            = _caps,                                     \
333 		.base_addr       = _base_addr,                                \
334 		.is_double       = _is_double,                                \
335 		.maxpacket_limit = _maxpacket_limit,                          \
336 	}
337 
338 /* This table is computed from the recommended values provided in the SOC
339  * datasheet. The buffer type (single/double) and the endpoint type cannot
340  * be changed. The mapping in internal RAM (base_addr and number of words)
341  * for each endpoints depends on the max packet size and the buffer type.
342  */
343 static const struct usbf_ep_info usbf_ep_info[USBF_NUM_ENDPOINTS] = {
344 	/* ep0: buf @0x0000 64 bytes, fixed 32 words */
345 	[0] = USBF_EP_INFO("ep0-ctrl",
346 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
347 				       USB_EP_CAPS_DIR_ALL),
348 			   0x0000, USBF_SINGLE_BUFFER, USBF_EP0_MAX_PCKT_SIZE),
349 	/* ep1: buf @0x0020, 2 buffers 512 bytes -> (512 * 2 / 4) words */
350 	[1] = USBF_EP_INFO("ep1-bulk",
351 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
352 				       USB_EP_CAPS_DIR_ALL),
353 			   0x0020, USBF_DOUBLE_BUFFER, 512),
354 	/* ep2: buf @0x0120, 2 buffers 512 bytes -> (512 * 2 / 4) words */
355 	[2] = USBF_EP_INFO("ep2-bulk",
356 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
357 				       USB_EP_CAPS_DIR_ALL),
358 			   0x0120, USBF_DOUBLE_BUFFER, 512),
359 	/* ep3: buf @0x0220, 1 buffer 512 bytes -> (512 * 2 / 4) words */
360 	[3] = USBF_EP_INFO("ep3-bulk",
361 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
362 				       USB_EP_CAPS_DIR_ALL),
363 			   0x0220, USBF_SINGLE_BUFFER, 512),
364 	/* ep4: buf @0x02A0, 1 buffer 512 bytes -> (512 * 1 / 4) words */
365 	[4] = USBF_EP_INFO("ep4-bulk",
366 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
367 				       USB_EP_CAPS_DIR_ALL),
368 			   0x02A0, USBF_SINGLE_BUFFER, 512),
369 	/* ep5: buf @0x0320, 1 buffer 512 bytes -> (512 * 2 / 4) words */
370 	[5] = USBF_EP_INFO("ep5-bulk",
371 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
372 				       USB_EP_CAPS_DIR_ALL),
373 			   0x0320, USBF_SINGLE_BUFFER, 512),
374 	/* ep6: buf @0x03A0, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
375 	[6] = USBF_EP_INFO("ep6-int",
376 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
377 				       USB_EP_CAPS_DIR_ALL),
378 			   0x03A0, USBF_SINGLE_BUFFER, 1024),
379 	/* ep7: buf @0x04A0, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
380 	[7] = USBF_EP_INFO("ep7-int",
381 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
382 				       USB_EP_CAPS_DIR_ALL),
383 			   0x04A0, USBF_SINGLE_BUFFER, 1024),
384 	/* ep8: buf @0x0520, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
385 	[8] = USBF_EP_INFO("ep8-int",
386 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
387 				       USB_EP_CAPS_DIR_ALL),
388 			   0x0520, USBF_SINGLE_BUFFER, 1024),
389 	/* ep9: buf @0x0620, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
390 	[9] = USBF_EP_INFO("ep9-int",
391 			   USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
392 				       USB_EP_CAPS_DIR_ALL),
393 			   0x0620, USBF_SINGLE_BUFFER, 1024),
394 	/* ep10: buf @0x0720, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
395 	[10] = USBF_EP_INFO("ep10-iso",
396 			    USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
397 					USB_EP_CAPS_DIR_ALL),
398 			    0x0720, USBF_DOUBLE_BUFFER, 1024),
399 	/* ep11: buf @0x0920, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
400 	[11] = USBF_EP_INFO("ep11-iso",
401 			    USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
402 					USB_EP_CAPS_DIR_ALL),
403 			    0x0920, USBF_DOUBLE_BUFFER, 1024),
404 	/* ep12: buf @0x0B20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
405 	[12] = USBF_EP_INFO("ep12-iso",
406 			    USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
407 					USB_EP_CAPS_DIR_ALL),
408 			    0x0B20, USBF_DOUBLE_BUFFER, 1024),
409 	/* ep13: buf @0x0D20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
410 	[13] = USBF_EP_INFO("ep13-iso",
411 			    USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
412 					USB_EP_CAPS_DIR_ALL),
413 			    0x0D20, USBF_DOUBLE_BUFFER, 1024),
414 	/* ep14: buf @0x0F20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
415 	[14] = USBF_EP_INFO("ep14-iso",
416 			    USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
417 					USB_EP_CAPS_DIR_ALL),
418 			    0x0F20, USBF_DOUBLE_BUFFER, 1024),
419 	/* ep15: buf @0x1120, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
420 	[15] = USBF_EP_INFO("ep15-iso",
421 			    USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
422 					USB_EP_CAPS_DIR_ALL),
423 			    0x1120, USBF_DOUBLE_BUFFER, 1024),
424 };
425 
usbf_reg_readl(struct usbf_udc * udc,uint offset)426 static inline u32 usbf_reg_readl(struct usbf_udc *udc, uint offset)
427 {
428 	return readl(udc->regs + offset);
429 }
430 
usbf_reg_writel(struct usbf_udc * udc,uint offset,u32 val)431 static inline void usbf_reg_writel(struct usbf_udc *udc, uint offset, u32 val)
432 {
433 	writel(val, udc->regs + offset);
434 }
435 
usbf_reg_bitset(struct usbf_udc * udc,uint offset,u32 set)436 static inline void usbf_reg_bitset(struct usbf_udc *udc, uint offset, u32 set)
437 {
438 	u32 tmp;
439 
440 	tmp = usbf_reg_readl(udc, offset);
441 	tmp |= set;
442 	usbf_reg_writel(udc, offset, tmp);
443 }
444 
usbf_reg_bitclr(struct usbf_udc * udc,uint offset,u32 clr)445 static inline void usbf_reg_bitclr(struct usbf_udc *udc, uint offset, u32 clr)
446 {
447 	u32 tmp;
448 
449 	tmp = usbf_reg_readl(udc, offset);
450 	tmp &= ~clr;
451 	usbf_reg_writel(udc, offset, tmp);
452 }
453 
usbf_reg_clrset(struct usbf_udc * udc,uint offset,u32 clr,u32 set)454 static inline void usbf_reg_clrset(struct usbf_udc *udc, uint offset,
455 				   u32 clr, u32 set)
456 {
457 	u32 tmp;
458 
459 	tmp = usbf_reg_readl(udc, offset);
460 	tmp &= ~clr;
461 	tmp |= set;
462 	usbf_reg_writel(udc, offset, tmp);
463 }
464 
usbf_ep_reg_readl(struct usbf_ep * ep,uint offset)465 static inline u32 usbf_ep_reg_readl(struct usbf_ep *ep, uint offset)
466 {
467 	return readl(ep->regs + offset);
468 }
469 
usbf_ep_reg_read_rep(struct usbf_ep * ep,uint offset,void * dst,uint count)470 static inline void usbf_ep_reg_read_rep(struct usbf_ep *ep, uint offset,
471 				       void *dst, uint count)
472 {
473 	readsl(ep->regs + offset, dst, count);
474 }
475 
usbf_ep_reg_writel(struct usbf_ep * ep,uint offset,u32 val)476 static inline void usbf_ep_reg_writel(struct usbf_ep *ep, uint offset, u32 val)
477 {
478 	writel(val, ep->regs + offset);
479 }
480 
usbf_ep_reg_write_rep(struct usbf_ep * ep,uint offset,const void * src,uint count)481 static inline void usbf_ep_reg_write_rep(struct usbf_ep *ep, uint offset,
482 					 const void *src, uint count)
483 {
484 	writesl(ep->regs + offset, src, count);
485 }
486 
usbf_ep_reg_bitset(struct usbf_ep * ep,uint offset,u32 set)487 static inline void usbf_ep_reg_bitset(struct usbf_ep *ep, uint offset, u32 set)
488 {
489 	u32 tmp;
490 
491 	tmp = usbf_ep_reg_readl(ep, offset);
492 	tmp |= set;
493 	usbf_ep_reg_writel(ep, offset, tmp);
494 }
495 
usbf_ep_reg_bitclr(struct usbf_ep * ep,uint offset,u32 clr)496 static inline void usbf_ep_reg_bitclr(struct usbf_ep *ep, uint offset, u32 clr)
497 {
498 	u32 tmp;
499 
500 	tmp = usbf_ep_reg_readl(ep, offset);
501 	tmp &= ~clr;
502 	usbf_ep_reg_writel(ep, offset, tmp);
503 }
504 
usbf_ep_reg_clrset(struct usbf_ep * ep,uint offset,u32 clr,u32 set)505 static inline void usbf_ep_reg_clrset(struct usbf_ep *ep, uint offset,
506 				      u32 clr, u32 set)
507 {
508 	u32 tmp;
509 
510 	tmp = usbf_ep_reg_readl(ep, offset);
511 	tmp &= ~clr;
512 	tmp |= set;
513 	usbf_ep_reg_writel(ep, offset, tmp);
514 }
515 
usbf_ep_dma_reg_readl(struct usbf_ep * ep,uint offset)516 static inline u32 usbf_ep_dma_reg_readl(struct usbf_ep *ep, uint offset)
517 {
518 	return readl(ep->dma_regs + offset);
519 }
520 
usbf_ep_dma_reg_writel(struct usbf_ep * ep,uint offset,u32 val)521 static inline void usbf_ep_dma_reg_writel(struct usbf_ep *ep, uint offset,
522 					  u32 val)
523 {
524 	writel(val, ep->dma_regs + offset);
525 }
526 
usbf_ep_dma_reg_bitset(struct usbf_ep * ep,uint offset,u32 set)527 static inline void usbf_ep_dma_reg_bitset(struct usbf_ep *ep, uint offset,
528 					  u32 set)
529 {
530 	u32 tmp;
531 
532 	tmp = usbf_ep_dma_reg_readl(ep, offset);
533 	tmp |= set;
534 	usbf_ep_dma_reg_writel(ep, offset, tmp);
535 }
536 
usbf_ep_dma_reg_bitclr(struct usbf_ep * ep,uint offset,u32 clr)537 static inline void usbf_ep_dma_reg_bitclr(struct usbf_ep *ep, uint offset,
538 					  u32 clr)
539 {
540 	u32 tmp;
541 
542 	tmp = usbf_ep_dma_reg_readl(ep, offset);
543 	tmp &= ~clr;
544 	usbf_ep_dma_reg_writel(ep, offset, tmp);
545 }
546 
usbf_ep0_send_null(struct usbf_ep * ep0,bool is_data1)547 static void usbf_ep0_send_null(struct usbf_ep *ep0, bool is_data1)
548 {
549 	u32 set;
550 
551 	set = USBF_EP0_DEND;
552 	if (is_data1)
553 		set |= USBF_EP0_PIDCLR;
554 
555 	usbf_ep_reg_bitset(ep0, USBF_REG_EP0_CONTROL, set);
556 }
557 
usbf_ep0_pio_in(struct usbf_ep * ep0,struct usbf_req * req)558 static int usbf_ep0_pio_in(struct usbf_ep *ep0, struct usbf_req *req)
559 {
560 	unsigned int left;
561 	unsigned int nb;
562 	const void *buf;
563 	u32 ctrl;
564 	u32 last;
565 
566 	left = req->req.length - req->req.actual;
567 
568 	if (left == 0) {
569 		if (!req->is_zero_sent) {
570 			if (req->req.length == 0) {
571 				dev_dbg(ep0->udc->dev, "ep0 send null\n");
572 				usbf_ep0_send_null(ep0, false);
573 				req->is_zero_sent = 1;
574 				return -EINPROGRESS;
575 			}
576 			if ((req->req.actual % ep0->ep.maxpacket) == 0) {
577 				if (req->req.zero) {
578 					dev_dbg(ep0->udc->dev, "ep0 send null\n");
579 					usbf_ep0_send_null(ep0, false);
580 					req->is_zero_sent = 1;
581 					return -EINPROGRESS;
582 				}
583 			}
584 		}
585 		return 0;
586 	}
587 
588 	if (left > ep0->ep.maxpacket)
589 		left = ep0->ep.maxpacket;
590 
591 	buf = req->req.buf;
592 	buf += req->req.actual;
593 
594 	nb = left / sizeof(u32);
595 	if (nb) {
596 		usbf_ep_reg_write_rep(ep0, USBF_REG_EP0_WRITE, buf, nb);
597 		buf += (nb * sizeof(u32));
598 		req->req.actual += (nb * sizeof(u32));
599 		left -= (nb * sizeof(u32));
600 	}
601 	ctrl = usbf_ep_reg_readl(ep0, USBF_REG_EP0_CONTROL);
602 	ctrl &= ~USBF_EP0_DW_MASK;
603 	if (left) {
604 		memcpy(&last, buf, left);
605 		usbf_ep_reg_writel(ep0, USBF_REG_EP0_WRITE, last);
606 		ctrl |= USBF_EP0_DW(left);
607 		req->req.actual += left;
608 	}
609 	usbf_ep_reg_writel(ep0, USBF_REG_EP0_CONTROL, ctrl | USBF_EP0_DEND);
610 
611 	dev_dbg(ep0->udc->dev, "ep0 send %u/%u\n",
612 		req->req.actual, req->req.length);
613 
614 	return -EINPROGRESS;
615 }
616 
usbf_ep0_pio_out(struct usbf_ep * ep0,struct usbf_req * req)617 static int usbf_ep0_pio_out(struct usbf_ep *ep0, struct usbf_req *req)
618 {
619 	int req_status = 0;
620 	unsigned int count;
621 	unsigned int recv;
622 	unsigned int left;
623 	unsigned int nb;
624 	void *buf;
625 	u32 last;
626 
627 	if (ep0->status & USBF_EP0_OUT_INT) {
628 		recv = usbf_ep_reg_readl(ep0, USBF_REG_EP0_LENGTH) & USBF_EP0_LDATA;
629 		count = recv;
630 
631 		buf = req->req.buf;
632 		buf += req->req.actual;
633 
634 		left = req->req.length - req->req.actual;
635 
636 		dev_dbg(ep0->udc->dev, "ep0 recv %u, left %u\n", count, left);
637 
638 		if (left > ep0->ep.maxpacket)
639 			left = ep0->ep.maxpacket;
640 
641 		if (count > left) {
642 			req_status = -EOVERFLOW;
643 			count = left;
644 		}
645 
646 		if (count) {
647 			nb = count / sizeof(u32);
648 			if (nb) {
649 				usbf_ep_reg_read_rep(ep0, USBF_REG_EP0_READ,
650 					buf, nb);
651 				buf += (nb * sizeof(u32));
652 				req->req.actual += (nb * sizeof(u32));
653 				count -= (nb * sizeof(u32));
654 			}
655 			if (count) {
656 				last = usbf_ep_reg_readl(ep0, USBF_REG_EP0_READ);
657 				memcpy(buf, &last, count);
658 				req->req.actual += count;
659 			}
660 		}
661 		dev_dbg(ep0->udc->dev, "ep0 recv %u/%u\n",
662 			req->req.actual, req->req.length);
663 
664 		if (req_status) {
665 			dev_dbg(ep0->udc->dev, "ep0 req.status=%d\n", req_status);
666 			req->req.status = req_status;
667 			return 0;
668 		}
669 
670 		if (recv < ep0->ep.maxpacket) {
671 			dev_dbg(ep0->udc->dev, "ep0 short packet\n");
672 			/* This is a short packet -> It is the end */
673 			req->req.status = 0;
674 			return 0;
675 		}
676 
677 		/* The Data stage of a control transfer from an endpoint to the
678 		 * host is complete when the endpoint does one of the following:
679 		 * - Has transferred exactly the expected amount of data
680 		 * - Transfers a packet with a payload size less than
681 		 *   wMaxPacketSize or transfers a zero-length packet
682 		 */
683 		if (req->req.actual == req->req.length) {
684 			req->req.status = 0;
685 			return 0;
686 		}
687 	}
688 
689 	if (ep0->status & USBF_EP0_OUT_NULL_INT) {
690 		/* NULL packet received */
691 		dev_dbg(ep0->udc->dev, "ep0 null packet\n");
692 		if (req->req.actual != req->req.length) {
693 			req->req.status = req->req.short_not_ok ?
694 					  -EREMOTEIO : 0;
695 		} else {
696 			req->req.status = 0;
697 		}
698 		return 0;
699 	}
700 
701 	return -EINPROGRESS;
702 }
703 
usbf_ep0_fifo_flush(struct usbf_ep * ep0)704 static void usbf_ep0_fifo_flush(struct usbf_ep *ep0)
705 {
706 	u32 sts;
707 	int ret;
708 
709 	usbf_ep_reg_bitset(ep0, USBF_REG_EP0_CONTROL, USBF_EP0_BCLR);
710 
711 	ret = readl_poll_timeout_atomic(ep0->regs + USBF_REG_EP0_STATUS, sts,
712 		(sts & (USBF_EP0_IN_DATA | USBF_EP0_IN_EMPTY)) == USBF_EP0_IN_EMPTY,
713 		0,  10000);
714 	if (ret)
715 		dev_err(ep0->udc->dev, "ep0 flush fifo timed out\n");
716 
717 }
718 
usbf_epn_send_null(struct usbf_ep * epn)719 static void usbf_epn_send_null(struct usbf_ep *epn)
720 {
721 	usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL, USBF_EPN_DEND);
722 }
723 
usbf_epn_send_residue(struct usbf_ep * epn,const void * buf,unsigned int size)724 static void usbf_epn_send_residue(struct usbf_ep *epn, const void *buf,
725 				  unsigned int size)
726 {
727 	u32 tmp;
728 
729 	memcpy(&tmp, buf, size);
730 	usbf_ep_reg_writel(epn, USBF_REG_EPN_WRITE, tmp);
731 
732 	usbf_ep_reg_clrset(epn, USBF_REG_EPN_CONTROL,
733 				USBF_EPN_DW_MASK,
734 				USBF_EPN_DW(size) | USBF_EPN_DEND);
735 }
736 
usbf_epn_pio_in(struct usbf_ep * epn,struct usbf_req * req)737 static int usbf_epn_pio_in(struct usbf_ep *epn, struct usbf_req *req)
738 {
739 	unsigned int left;
740 	unsigned int nb;
741 	const void *buf;
742 
743 	left = req->req.length - req->req.actual;
744 
745 	if (left == 0) {
746 		if (!req->is_zero_sent) {
747 			if (req->req.length == 0) {
748 				dev_dbg(epn->udc->dev, "ep%u send_null\n", epn->id);
749 				usbf_epn_send_null(epn);
750 				req->is_zero_sent = 1;
751 				return -EINPROGRESS;
752 			}
753 			if ((req->req.actual % epn->ep.maxpacket) == 0) {
754 				if (req->req.zero) {
755 					dev_dbg(epn->udc->dev, "ep%u send_null\n",
756 						epn->id);
757 					usbf_epn_send_null(epn);
758 					req->is_zero_sent = 1;
759 					return -EINPROGRESS;
760 				}
761 			}
762 		}
763 		return 0;
764 	}
765 
766 	if (left > epn->ep.maxpacket)
767 		left = epn->ep.maxpacket;
768 
769 	buf = req->req.buf;
770 	buf += req->req.actual;
771 
772 	nb = left / sizeof(u32);
773 	if (nb) {
774 		usbf_ep_reg_write_rep(epn, USBF_REG_EPN_WRITE, buf, nb);
775 		buf += (nb * sizeof(u32));
776 		req->req.actual += (nb * sizeof(u32));
777 		left -= (nb * sizeof(u32));
778 	}
779 
780 	if (left) {
781 		usbf_epn_send_residue(epn, buf, left);
782 		req->req.actual += left;
783 	} else {
784 		usbf_ep_reg_clrset(epn, USBF_REG_EPN_CONTROL,
785 					USBF_EPN_DW_MASK,
786 					USBF_EPN_DEND);
787 	}
788 
789 	dev_dbg(epn->udc->dev, "ep%u send %u/%u\n", epn->id, req->req.actual,
790 		req->req.length);
791 
792 	return -EINPROGRESS;
793 }
794 
usbf_epn_enable_in_end_int(struct usbf_ep * epn)795 static void usbf_epn_enable_in_end_int(struct usbf_ep *epn)
796 {
797 	usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_IN_END_EN);
798 }
799 
usbf_epn_dma_in(struct usbf_ep * epn,struct usbf_req * req)800 static int usbf_epn_dma_in(struct usbf_ep *epn, struct usbf_req *req)
801 {
802 	unsigned int left;
803 	u32 npkt;
804 	u32 lastpkt;
805 	int ret;
806 
807 	if (!IS_ALIGNED((uintptr_t)req->req.buf, 4)) {
808 		dev_dbg(epn->udc->dev, "ep%u buf unaligned -> fallback pio\n",
809 			epn->id);
810 		return usbf_epn_pio_in(epn, req);
811 	}
812 
813 	left = req->req.length - req->req.actual;
814 
815 	switch (req->xfer_step) {
816 	default:
817 	case USBF_XFER_START:
818 		if (left == 0) {
819 			dev_dbg(epn->udc->dev, "ep%u send null\n", epn->id);
820 			usbf_epn_send_null(epn);
821 			req->xfer_step = USBF_XFER_WAIT_END;
822 			break;
823 		}
824 		if (left < 4) {
825 			dev_dbg(epn->udc->dev, "ep%u send residue %u\n", epn->id,
826 				left);
827 			usbf_epn_send_residue(epn,
828 				req->req.buf + req->req.actual, left);
829 			req->req.actual += left;
830 			req->xfer_step = USBF_XFER_WAIT_END;
831 			break;
832 		}
833 
834 		ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 1);
835 		if (ret < 0) {
836 			dev_err(epn->udc->dev, "usb_gadget_map_request failed (%d)\n",
837 				ret);
838 			return ret;
839 		}
840 		req->is_mapped = 1;
841 
842 		npkt = DIV_ROUND_UP(left, epn->ep.maxpacket);
843 		lastpkt = (left % epn->ep.maxpacket);
844 		if (lastpkt == 0)
845 			lastpkt = epn->ep.maxpacket;
846 		lastpkt &= ~0x3; /* DMA is done on 32bit units */
847 
848 		usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR2,
849 			USBF_SYS_EPN_MPKT(epn->ep.maxpacket) | USBF_SYS_EPN_LMPKT(lastpkt));
850 		usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_TADR,
851 			req->req.dma);
852 		usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
853 			USBF_SYS_EPN_SET_DMACNT(npkt));
854 		usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
855 			USBF_SYS_EPN_REQEN);
856 
857 		usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT, USBF_EPN_SET_DMACNT(npkt));
858 
859 		usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
860 
861 		/* The end of DMA transfer at the USBF level needs to be handle
862 		 * after the detection of the end of DMA transfer at the brige
863 		 * level.
864 		 * To force this sequence, EPN_IN_END_EN will be set by the
865 		 * detection of the end of transfer at bridge level (ie. bridge
866 		 * interrupt).
867 		 */
868 		usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
869 			USBF_EPN_IN_EN | USBF_EPN_IN_END_EN);
870 		epn->bridge_on_dma_end = usbf_epn_enable_in_end_int;
871 
872 		/* Clear any pending IN_END interrupt */
873 		usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(u32)USBF_EPN_IN_END_INT);
874 
875 		usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
876 			USBF_EPN_BURST_SET | USBF_EPN_DMAMODE0);
877 		usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
878 			USBF_EPN_DMA_EN);
879 
880 		req->dma_size = (npkt - 1) * epn->ep.maxpacket + lastpkt;
881 
882 		dev_dbg(epn->udc->dev, "ep%u dma xfer %zu\n", epn->id,
883 			req->dma_size);
884 
885 		req->xfer_step = USBF_XFER_WAIT_DMA;
886 		break;
887 
888 	case USBF_XFER_WAIT_DMA:
889 		if (!(epn->status & USBF_EPN_IN_END_INT)) {
890 			dev_dbg(epn->udc->dev, "ep%u dma not done\n", epn->id);
891 			break;
892 		}
893 		dev_dbg(epn->udc->dev, "ep%u dma done\n", epn->id);
894 
895 		usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 1);
896 		req->is_mapped = 0;
897 
898 		usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
899 
900 		usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
901 			USBF_EPN_IN_END_EN,
902 			USBF_EPN_IN_EN);
903 
904 		req->req.actual += req->dma_size;
905 
906 		left = req->req.length - req->req.actual;
907 		if (left) {
908 			usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(u32)USBF_EPN_IN_INT);
909 
910 			dev_dbg(epn->udc->dev, "ep%u send residue %u\n", epn->id,
911 				left);
912 			usbf_epn_send_residue(epn,
913 				req->req.buf + req->req.actual, left);
914 			req->req.actual += left;
915 			req->xfer_step = USBF_XFER_WAIT_END;
916 			break;
917 		}
918 
919 		if (req->req.actual % epn->ep.maxpacket) {
920 			/* last packet was a short packet. Tell the hardware to
921 			 * send it right now.
922 			 */
923 			dev_dbg(epn->udc->dev, "ep%u send short\n", epn->id);
924 			usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
925 				~(u32)USBF_EPN_IN_INT);
926 			usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL,
927 				USBF_EPN_DEND);
928 
929 			req->xfer_step = USBF_XFER_WAIT_END;
930 			break;
931 		}
932 
933 		/* Last packet size was a maxpacket size
934 		 * Send null packet if needed
935 		 */
936 		if (req->req.zero) {
937 			req->xfer_step = USBF_XFER_SEND_NULL;
938 			break;
939 		}
940 
941 		/* No more action to do. Wait for the end of the USB transfer */
942 		req->xfer_step = USBF_XFER_WAIT_END;
943 		break;
944 
945 	case USBF_XFER_SEND_NULL:
946 		dev_dbg(epn->udc->dev, "ep%u send null\n", epn->id);
947 		usbf_epn_send_null(epn);
948 		req->xfer_step = USBF_XFER_WAIT_END;
949 		break;
950 
951 	case USBF_XFER_WAIT_END:
952 		if (!(epn->status & USBF_EPN_IN_INT)) {
953 			dev_dbg(epn->udc->dev, "ep%u end not done\n", epn->id);
954 			break;
955 		}
956 		dev_dbg(epn->udc->dev, "ep%u send done %u/%u\n", epn->id,
957 			req->req.actual, req->req.length);
958 		req->xfer_step = USBF_XFER_START;
959 		return 0;
960 	}
961 
962 	return -EINPROGRESS;
963 }
964 
usbf_epn_recv_residue(struct usbf_ep * epn,void * buf,unsigned int size)965 static void usbf_epn_recv_residue(struct usbf_ep *epn, void *buf,
966 				  unsigned int size)
967 {
968 	u32 last;
969 
970 	last = usbf_ep_reg_readl(epn, USBF_REG_EPN_READ);
971 	memcpy(buf, &last, size);
972 }
973 
usbf_epn_pio_out(struct usbf_ep * epn,struct usbf_req * req)974 static int usbf_epn_pio_out(struct usbf_ep *epn, struct usbf_req *req)
975 {
976 	int req_status = 0;
977 	unsigned int count;
978 	unsigned int recv;
979 	unsigned int left;
980 	unsigned int nb;
981 	void *buf;
982 
983 	if (epn->status & USBF_EPN_OUT_INT) {
984 		recv = USBF_EPN_GET_LDATA(
985 			usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
986 		count = recv;
987 
988 		buf = req->req.buf;
989 		buf += req->req.actual;
990 
991 		left = req->req.length - req->req.actual;
992 
993 		dev_dbg(epn->udc->dev, "ep%u recv %u, left %u, mpkt %u\n", epn->id,
994 			recv, left, epn->ep.maxpacket);
995 
996 		if (left > epn->ep.maxpacket)
997 			left = epn->ep.maxpacket;
998 
999 		if (count > left) {
1000 			req_status = -EOVERFLOW;
1001 			count = left;
1002 		}
1003 
1004 		if (count) {
1005 			nb = count / sizeof(u32);
1006 			if (nb) {
1007 				usbf_ep_reg_read_rep(epn, USBF_REG_EPN_READ,
1008 					buf, nb);
1009 				buf += (nb * sizeof(u32));
1010 				req->req.actual += (nb * sizeof(u32));
1011 				count -= (nb * sizeof(u32));
1012 			}
1013 			if (count) {
1014 				usbf_epn_recv_residue(epn, buf, count);
1015 				req->req.actual += count;
1016 			}
1017 		}
1018 		dev_dbg(epn->udc->dev, "ep%u recv %u/%u\n", epn->id,
1019 			req->req.actual, req->req.length);
1020 
1021 		if (req_status) {
1022 			dev_dbg(epn->udc->dev, "ep%u req.status=%d\n", epn->id,
1023 				req_status);
1024 			req->req.status = req_status;
1025 			return 0;
1026 		}
1027 
1028 		if (recv < epn->ep.maxpacket) {
1029 			dev_dbg(epn->udc->dev, "ep%u short packet\n", epn->id);
1030 			/* This is a short packet -> It is the end */
1031 			req->req.status = 0;
1032 			return 0;
1033 		}
1034 
1035 		/* Request full -> complete */
1036 		if (req->req.actual == req->req.length) {
1037 			req->req.status = 0;
1038 			return 0;
1039 		}
1040 	}
1041 
1042 	if (epn->status & USBF_EPN_OUT_NULL_INT) {
1043 		/* NULL packet received */
1044 		dev_dbg(epn->udc->dev, "ep%u null packet\n", epn->id);
1045 		if (req->req.actual != req->req.length) {
1046 			req->req.status = req->req.short_not_ok ?
1047 					  -EREMOTEIO : 0;
1048 		} else {
1049 			req->req.status = 0;
1050 		}
1051 		return 0;
1052 	}
1053 
1054 	return -EINPROGRESS;
1055 }
1056 
usbf_epn_enable_out_end_int(struct usbf_ep * epn)1057 static void usbf_epn_enable_out_end_int(struct usbf_ep *epn)
1058 {
1059 	usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_OUT_END_EN);
1060 }
1061 
1062 static void usbf_epn_process_queue(struct usbf_ep *epn);
1063 
usbf_epn_dma_out_send_dma(struct usbf_ep * epn,dma_addr_t addr,u32 npkt,bool is_short)1064 static void usbf_epn_dma_out_send_dma(struct usbf_ep *epn, dma_addr_t addr, u32 npkt, bool is_short)
1065 {
1066 	usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR2, USBF_SYS_EPN_MPKT(epn->ep.maxpacket));
1067 	usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_TADR, addr);
1068 
1069 	if (is_short) {
1070 		usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
1071 				USBF_SYS_EPN_SET_DMACNT(1) | USBF_SYS_EPN_DIR0);
1072 		usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
1073 				USBF_SYS_EPN_REQEN);
1074 
1075 		usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT,
1076 				USBF_EPN_SET_DMACNT(0));
1077 
1078 		/* The end of DMA transfer at the USBF level needs to be handled
1079 		 * after the detection of the end of DMA transfer at the brige
1080 		 * level.
1081 		 * To force this sequence, enabling the OUT_END interrupt will
1082 		 * be donee by the detection of the end of transfer at bridge
1083 		 * level (ie. bridge interrupt).
1084 		 */
1085 		usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
1086 			USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN | USBF_EPN_OUT_END_EN);
1087 		epn->bridge_on_dma_end = usbf_epn_enable_out_end_int;
1088 
1089 		/* Clear any pending OUT_END interrupt */
1090 		usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
1091 			~(u32)USBF_EPN_OUT_END_INT);
1092 
1093 		usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
1094 			USBF_EPN_STOP_MODE | USBF_EPN_STOP_SET | USBF_EPN_DMAMODE0);
1095 		usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
1096 			USBF_EPN_DMA_EN);
1097 		return;
1098 	}
1099 
1100 	usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
1101 		USBF_SYS_EPN_SET_DMACNT(npkt) | USBF_SYS_EPN_DIR0);
1102 	usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
1103 		USBF_SYS_EPN_REQEN);
1104 
1105 	usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT,
1106 		USBF_EPN_SET_DMACNT(npkt));
1107 
1108 	/* Here, the bridge may or may not generate an interrupt to signal the
1109 	 * end of DMA transfer.
1110 	 * Keep only OUT_END interrupt and let handle the bridge later during
1111 	 * the OUT_END processing.
1112 	 */
1113 	usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
1114 		USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN,
1115 		USBF_EPN_OUT_END_EN);
1116 
1117 	/* Disable bridge interrupt. It will be renabled later */
1118 	usbf_reg_bitclr(epn->udc, USBF_REG_AHBBINTEN,
1119 		USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
1120 
1121 	/* Clear any pending DMA_END interrupt at bridge level */
1122 	usbf_reg_writel(epn->udc, USBF_REG_AHBBINT,
1123 		USBF_SYS_DMA_ENDINT_EPN(epn->id));
1124 
1125 	/* Clear any pending OUT_END interrupt */
1126 	usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
1127 		~(u32)USBF_EPN_OUT_END_INT);
1128 
1129 	usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
1130 		USBF_EPN_STOP_MODE | USBF_EPN_STOP_SET | USBF_EPN_DMAMODE0 | USBF_EPN_BURST_SET);
1131 	usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
1132 		USBF_EPN_DMA_EN);
1133 }
1134 
usbf_epn_dma_out_complete_dma(struct usbf_ep * epn,bool is_short)1135 static size_t usbf_epn_dma_out_complete_dma(struct usbf_ep *epn, bool is_short)
1136 {
1137 	u32 dmacnt;
1138 	u32 tmp;
1139 	int ret;
1140 
1141 	/* Restore interrupt mask */
1142 	usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
1143 		USBF_EPN_OUT_END_EN,
1144 		USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
1145 
1146 	if (is_short) {
1147 		/* Nothing more to do when the DMA was for a short packet */
1148 		return 0;
1149 	}
1150 
1151 	/* Enable the bridge interrupt */
1152 	usbf_reg_bitset(epn->udc, USBF_REG_AHBBINTEN,
1153 		USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
1154 
1155 	tmp = usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT);
1156 	dmacnt = USBF_EPN_GET_DMACNT(tmp);
1157 
1158 	if (dmacnt) {
1159 		/* Some packet were not received (halted by a short or a null
1160 		 * packet.
1161 		 * The bridge never raises an interrupt in this case.
1162 		 * Wait for the end of transfer at bridge level
1163 		 */
1164 		ret = readl_poll_timeout_atomic(
1165 			epn->dma_regs + USBF_REG_DMA_EPN_DCR1,
1166 			tmp, (USBF_SYS_EPN_GET_DMACNT(tmp) == dmacnt),
1167 			0,  10000);
1168 		if (ret) {
1169 			dev_err(epn->udc->dev, "ep%u wait bridge timed out\n",
1170 				epn->id);
1171 		}
1172 
1173 		usbf_ep_dma_reg_bitclr(epn, USBF_REG_DMA_EPN_DCR1,
1174 			USBF_SYS_EPN_REQEN);
1175 
1176 		/* The dmacnt value tells how many packet were not transferred
1177 		 * from the maximum number of packet we set for the DMA transfer.
1178 		 * Compute the left DMA size based on this value.
1179 		 */
1180 		return dmacnt * epn->ep.maxpacket;
1181 	}
1182 
1183 	return 0;
1184 }
1185 
usbf_epn_dma_out(struct usbf_ep * epn,struct usbf_req * req)1186 static int usbf_epn_dma_out(struct usbf_ep *epn, struct usbf_req *req)
1187 {
1188 	unsigned int dma_left;
1189 	unsigned int count;
1190 	unsigned int recv;
1191 	unsigned int left;
1192 	u32 npkt;
1193 	int ret;
1194 
1195 	if (!IS_ALIGNED((uintptr_t)req->req.buf, 4)) {
1196 		dev_dbg(epn->udc->dev, "ep%u buf unaligned -> fallback pio\n",
1197 			epn->id);
1198 		return usbf_epn_pio_out(epn, req);
1199 	}
1200 
1201 	switch (req->xfer_step) {
1202 	default:
1203 	case USBF_XFER_START:
1204 		if (epn->status & USBF_EPN_OUT_NULL_INT) {
1205 			dev_dbg(epn->udc->dev, "ep%u null packet\n", epn->id);
1206 			if (req->req.actual != req->req.length) {
1207 				req->req.status = req->req.short_not_ok ?
1208 					-EREMOTEIO : 0;
1209 			} else {
1210 				req->req.status = 0;
1211 			}
1212 			return 0;
1213 		}
1214 
1215 		if (!(epn->status & USBF_EPN_OUT_INT)) {
1216 			dev_dbg(epn->udc->dev, "ep%u OUT_INT not set -> spurious\n",
1217 				epn->id);
1218 			break;
1219 		}
1220 
1221 		recv = USBF_EPN_GET_LDATA(
1222 			usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
1223 		if (!recv) {
1224 			dev_dbg(epn->udc->dev, "ep%u recv = 0 -> spurious\n",
1225 				epn->id);
1226 			break;
1227 		}
1228 
1229 		left = req->req.length - req->req.actual;
1230 
1231 		dev_dbg(epn->udc->dev, "ep%u recv %u, left %u, mpkt %u\n", epn->id,
1232 			recv, left, epn->ep.maxpacket);
1233 
1234 		if (recv > left) {
1235 			dev_err(epn->udc->dev, "ep%u overflow (%u/%u)\n",
1236 				epn->id, recv, left);
1237 			req->req.status = -EOVERFLOW;
1238 			return -EOVERFLOW;
1239 		}
1240 
1241 		if (recv < epn->ep.maxpacket) {
1242 			/* Short packet received */
1243 			dev_dbg(epn->udc->dev, "ep%u short packet\n", epn->id);
1244 			if (recv <= 3) {
1245 				usbf_epn_recv_residue(epn,
1246 					req->req.buf + req->req.actual, recv);
1247 				req->req.actual += recv;
1248 
1249 				dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n",
1250 					epn->id, req->req.actual, req->req.length);
1251 
1252 				req->xfer_step = USBF_XFER_START;
1253 				return 0;
1254 			}
1255 
1256 			ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 0);
1257 			if (ret < 0) {
1258 				dev_err(epn->udc->dev, "map request failed (%d)\n",
1259 					ret);
1260 				return ret;
1261 			}
1262 			req->is_mapped = 1;
1263 
1264 			usbf_epn_dma_out_send_dma(epn,
1265 				req->req.dma + req->req.actual,
1266 				1, true);
1267 			req->dma_size = recv & ~0x3;
1268 
1269 			dev_dbg(epn->udc->dev, "ep%u dma short xfer %zu\n", epn->id,
1270 				req->dma_size);
1271 
1272 			req->xfer_step = USBF_XFER_WAIT_DMA_SHORT;
1273 			break;
1274 		}
1275 
1276 		ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 0);
1277 		if (ret < 0) {
1278 			dev_err(epn->udc->dev, "map request failed (%d)\n",
1279 				ret);
1280 			return ret;
1281 		}
1282 		req->is_mapped = 1;
1283 
1284 		/* Use the maximum DMA size according to the request buffer.
1285 		 * We will adjust the received size later at the end of the DMA
1286 		 * transfer with the left size computed from
1287 		 * usbf_epn_dma_out_complete_dma().
1288 		 */
1289 		npkt = left / epn->ep.maxpacket;
1290 		usbf_epn_dma_out_send_dma(epn,
1291 				req->req.dma + req->req.actual,
1292 				npkt, false);
1293 		req->dma_size = npkt * epn->ep.maxpacket;
1294 
1295 		dev_dbg(epn->udc->dev, "ep%u dma xfer %zu (%u)\n", epn->id,
1296 			req->dma_size, npkt);
1297 
1298 		req->xfer_step = USBF_XFER_WAIT_DMA;
1299 		break;
1300 
1301 	case USBF_XFER_WAIT_DMA_SHORT:
1302 		if (!(epn->status & USBF_EPN_OUT_END_INT)) {
1303 			dev_dbg(epn->udc->dev, "ep%u dma short not done\n", epn->id);
1304 			break;
1305 		}
1306 		dev_dbg(epn->udc->dev, "ep%u dma short done\n", epn->id);
1307 
1308 		usbf_epn_dma_out_complete_dma(epn, true);
1309 
1310 		usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
1311 		req->is_mapped = 0;
1312 
1313 		req->req.actual += req->dma_size;
1314 
1315 		recv = USBF_EPN_GET_LDATA(
1316 			usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
1317 
1318 		count = recv & 0x3;
1319 		if (count) {
1320 			dev_dbg(epn->udc->dev, "ep%u recv residue %u\n", epn->id,
1321 				count);
1322 			usbf_epn_recv_residue(epn,
1323 				req->req.buf + req->req.actual, count);
1324 			req->req.actual += count;
1325 		}
1326 
1327 		dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
1328 			req->req.actual, req->req.length);
1329 
1330 		req->xfer_step = USBF_XFER_START;
1331 		return 0;
1332 
1333 	case USBF_XFER_WAIT_DMA:
1334 		if (!(epn->status & USBF_EPN_OUT_END_INT)) {
1335 			dev_dbg(epn->udc->dev, "ep%u dma not done\n", epn->id);
1336 			break;
1337 		}
1338 		dev_dbg(epn->udc->dev, "ep%u dma done\n", epn->id);
1339 
1340 		dma_left = usbf_epn_dma_out_complete_dma(epn, false);
1341 		if (dma_left) {
1342 			/* Adjust the final DMA size with */
1343 			count = req->dma_size - dma_left;
1344 
1345 			dev_dbg(epn->udc->dev, "ep%u dma xfer done %u\n", epn->id,
1346 				count);
1347 
1348 			req->req.actual += count;
1349 
1350 			if (epn->status & USBF_EPN_OUT_NULL_INT) {
1351 				/* DMA was stopped by a null packet reception */
1352 				dev_dbg(epn->udc->dev, "ep%u dma stopped by null pckt\n",
1353 					epn->id);
1354 				usb_gadget_unmap_request(&epn->udc->gadget,
1355 							 &req->req, 0);
1356 				req->is_mapped = 0;
1357 
1358 				usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
1359 					~(u32)USBF_EPN_OUT_NULL_INT);
1360 
1361 				if (req->req.actual != req->req.length) {
1362 					req->req.status = req->req.short_not_ok ?
1363 						  -EREMOTEIO : 0;
1364 				} else {
1365 					req->req.status = 0;
1366 				}
1367 				dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n",
1368 					epn->id, req->req.actual, req->req.length);
1369 				req->xfer_step = USBF_XFER_START;
1370 				return 0;
1371 			}
1372 
1373 			recv = USBF_EPN_GET_LDATA(
1374 				usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
1375 			left = req->req.length - req->req.actual;
1376 			if (recv > left) {
1377 				dev_err(epn->udc->dev,
1378 					"ep%u overflow (%u/%u)\n", epn->id,
1379 					recv, left);
1380 				req->req.status = -EOVERFLOW;
1381 				usb_gadget_unmap_request(&epn->udc->gadget,
1382 							 &req->req, 0);
1383 				req->is_mapped = 0;
1384 
1385 				req->xfer_step = USBF_XFER_START;
1386 				return -EOVERFLOW;
1387 			}
1388 
1389 			if (recv > 3) {
1390 				usbf_epn_dma_out_send_dma(epn,
1391 					req->req.dma + req->req.actual,
1392 					1, true);
1393 				req->dma_size = recv & ~0x3;
1394 
1395 				dev_dbg(epn->udc->dev, "ep%u dma short xfer %zu\n",
1396 					epn->id, req->dma_size);
1397 
1398 				req->xfer_step = USBF_XFER_WAIT_DMA_SHORT;
1399 				break;
1400 			}
1401 
1402 			usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
1403 			req->is_mapped = 0;
1404 
1405 			count = recv & 0x3;
1406 			if (count) {
1407 				dev_dbg(epn->udc->dev, "ep%u recv residue %u\n",
1408 					epn->id, count);
1409 				usbf_epn_recv_residue(epn,
1410 					req->req.buf + req->req.actual, count);
1411 				req->req.actual += count;
1412 			}
1413 
1414 			dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
1415 				req->req.actual, req->req.length);
1416 
1417 			req->xfer_step = USBF_XFER_START;
1418 			return 0;
1419 		}
1420 
1421 		/* Process queue at bridge interrupt only */
1422 		usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
1423 			USBF_EPN_OUT_END_EN | USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
1424 		epn->status = 0;
1425 		epn->bridge_on_dma_end = usbf_epn_process_queue;
1426 
1427 		req->xfer_step = USBF_XFER_WAIT_BRIDGE;
1428 		break;
1429 
1430 	case USBF_XFER_WAIT_BRIDGE:
1431 		dev_dbg(epn->udc->dev, "ep%u bridge transfers done\n", epn->id);
1432 
1433 		/* Restore interrupt mask */
1434 		usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
1435 			USBF_EPN_OUT_END_EN,
1436 			USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
1437 
1438 		usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
1439 		req->is_mapped = 0;
1440 
1441 		req->req.actual += req->dma_size;
1442 
1443 		req->xfer_step = USBF_XFER_START;
1444 		left = req->req.length - req->req.actual;
1445 		if (!left) {
1446 			/* No more data can be added to the buffer */
1447 			dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
1448 				req->req.actual, req->req.length);
1449 			return 0;
1450 		}
1451 		dev_dbg(epn->udc->dev, "ep%u recv done %u/%u, wait more data\n",
1452 			epn->id, req->req.actual, req->req.length);
1453 		break;
1454 	}
1455 
1456 	return -EINPROGRESS;
1457 }
1458 
usbf_epn_dma_stop(struct usbf_ep * epn)1459 static void usbf_epn_dma_stop(struct usbf_ep *epn)
1460 {
1461 	usbf_ep_dma_reg_bitclr(epn, USBF_REG_DMA_EPN_DCR1, USBF_SYS_EPN_REQEN);
1462 
1463 	/* In the datasheet:
1464 	 *   If EP[m]_REQEN = 0b is set during DMA transfer, AHB-EPC stops DMA
1465 	 *   after 1 packet transfer completed.
1466 	 *   Therefore, wait sufficient time for ensuring DMA transfer
1467 	 *   completion. The WAIT time depends on the system, especially AHB
1468 	 *   bus activity
1469 	 * So arbitrary 10ms would be sufficient.
1470 	 */
1471 	mdelay(10);
1472 
1473 	usbf_ep_reg_bitclr(epn, USBF_REG_EPN_DMA_CTRL, USBF_EPN_DMA_EN);
1474 }
1475 
usbf_epn_dma_abort(struct usbf_ep * epn,struct usbf_req * req)1476 static void usbf_epn_dma_abort(struct usbf_ep *epn,  struct usbf_req *req)
1477 {
1478 	dev_dbg(epn->udc->dev, "ep%u %s dma abort\n", epn->id,
1479 		epn->is_in ? "in" : "out");
1480 
1481 	epn->bridge_on_dma_end = NULL;
1482 
1483 	usbf_epn_dma_stop(epn);
1484 
1485 	usb_gadget_unmap_request(&epn->udc->gadget, &req->req,
1486 				 epn->is_in ? 1 : 0);
1487 	req->is_mapped = 0;
1488 
1489 	usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
1490 
1491 	if (epn->is_in) {
1492 		usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
1493 			USBF_EPN_IN_END_EN,
1494 			USBF_EPN_IN_EN);
1495 	} else {
1496 		usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
1497 			USBF_EPN_OUT_END_EN,
1498 			USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
1499 	}
1500 
1501 	/* As dma is stopped, be sure that no DMA interrupt are pending */
1502 	usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
1503 		USBF_EPN_IN_END_INT | USBF_EPN_OUT_END_INT);
1504 
1505 	usbf_reg_writel(epn->udc, USBF_REG_AHBBINT, USBF_SYS_DMA_ENDINT_EPN(epn->id));
1506 
1507 	/* Enable DMA interrupt the bridge level */
1508 	usbf_reg_bitset(epn->udc, USBF_REG_AHBBINTEN,
1509 		USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
1510 
1511 	/* Reset transfer step */
1512 	req->xfer_step = USBF_XFER_START;
1513 }
1514 
usbf_epn_fifo_flush(struct usbf_ep * epn)1515 static void usbf_epn_fifo_flush(struct usbf_ep *epn)
1516 {
1517 	u32 ctrl;
1518 	u32 sts;
1519 	int ret;
1520 
1521 	dev_dbg(epn->udc->dev, "ep%u %s fifo flush\n", epn->id,
1522 		epn->is_in ? "in" : "out");
1523 
1524 	ctrl = usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL);
1525 	usbf_ep_reg_writel(epn, USBF_REG_EPN_CONTROL, ctrl | USBF_EPN_BCLR);
1526 
1527 	if (ctrl & USBF_EPN_DIR0)
1528 		return;
1529 
1530 	ret = readl_poll_timeout_atomic(epn->regs + USBF_REG_EPN_STATUS, sts,
1531 		(sts & (USBF_EPN_IN_DATA | USBF_EPN_IN_EMPTY)) == USBF_EPN_IN_EMPTY,
1532 		0,  10000);
1533 	if (ret)
1534 		dev_err(epn->udc->dev, "ep%u flush fifo timed out\n", epn->id);
1535 }
1536 
usbf_ep_req_done(struct usbf_ep * ep,struct usbf_req * req,int status)1537 static void usbf_ep_req_done(struct usbf_ep *ep, struct usbf_req *req,
1538 			     int status)
1539 {
1540 	list_del_init(&req->queue);
1541 
1542 	if (status) {
1543 		req->req.status = status;
1544 	} else {
1545 		if (req->req.status == -EINPROGRESS)
1546 			req->req.status = status;
1547 	}
1548 
1549 	dev_dbg(ep->udc->dev, "ep%u %s req done length %u/%u, status=%d\n", ep->id,
1550 		ep->is_in ? "in" : "out",
1551 		req->req.actual, req->req.length, req->req.status);
1552 
1553 	if (req->is_mapped)
1554 		usbf_epn_dma_abort(ep, req);
1555 
1556 	spin_unlock(&ep->udc->lock);
1557 	usb_gadget_giveback_request(&ep->ep, &req->req);
1558 	spin_lock(&ep->udc->lock);
1559 }
1560 
usbf_ep_nuke(struct usbf_ep * ep,int status)1561 static void usbf_ep_nuke(struct usbf_ep *ep, int status)
1562 {
1563 	struct usbf_req *req;
1564 
1565 	dev_dbg(ep->udc->dev, "ep%u %s nuke status %d\n", ep->id,
1566 		ep->is_in ? "in" : "out",
1567 		status);
1568 
1569 	while (!list_empty(&ep->queue)) {
1570 		req = list_first_entry(&ep->queue, struct usbf_req, queue);
1571 		usbf_ep_req_done(ep, req, status);
1572 	}
1573 
1574 	if (ep->id == 0)
1575 		usbf_ep0_fifo_flush(ep);
1576 	else
1577 		usbf_epn_fifo_flush(ep);
1578 }
1579 
usbf_ep_is_stalled(struct usbf_ep * ep)1580 static bool usbf_ep_is_stalled(struct usbf_ep *ep)
1581 {
1582 	u32 ctrl;
1583 
1584 	if (ep->id == 0) {
1585 		ctrl = usbf_ep_reg_readl(ep, USBF_REG_EP0_CONTROL);
1586 		return (ctrl & USBF_EP0_STL) ? true : false;
1587 	}
1588 
1589 	ctrl = usbf_ep_reg_readl(ep, USBF_REG_EPN_CONTROL);
1590 	if (ep->is_in)
1591 		return (ctrl & USBF_EPN_ISTL) ? true : false;
1592 
1593 	return (ctrl & USBF_EPN_OSTL) ? true : false;
1594 }
1595 
usbf_epn_start_queue(struct usbf_ep * epn)1596 static int usbf_epn_start_queue(struct usbf_ep *epn)
1597 {
1598 	struct usbf_req *req;
1599 	int ret;
1600 
1601 	if (usbf_ep_is_stalled(epn))
1602 		return 0;
1603 
1604 	req = list_first_entry_or_null(&epn->queue, struct usbf_req, queue);
1605 
1606 	if (epn->is_in) {
1607 		if (req && !epn->is_processing) {
1608 			ret = epn->dma_regs ?
1609 				usbf_epn_dma_in(epn, req) :
1610 				usbf_epn_pio_in(epn, req);
1611 			if (ret != -EINPROGRESS) {
1612 				dev_err(epn->udc->dev,
1613 					"queued next request not in progress\n");
1614 					/* The request cannot be completed (ie
1615 					 * ret == 0) on the first call.
1616 					 * stall and nuke the endpoint
1617 					 */
1618 				return ret ? ret : -EIO;
1619 			}
1620 		}
1621 	} else {
1622 		if (req) {
1623 			/* Clear ONAK to accept OUT tokens */
1624 			usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL,
1625 				USBF_EPN_ONAK);
1626 
1627 			/* Enable interrupts */
1628 			usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA,
1629 				USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
1630 		} else {
1631 			/* Disable incoming data and interrupt.
1632 			 * They will be enable on next usb_eb_queue call
1633 			 */
1634 			usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL,
1635 				USBF_EPN_ONAK);
1636 			usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
1637 				USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
1638 		}
1639 	}
1640 	return 0;
1641 }
1642 
usbf_ep_process_queue(struct usbf_ep * ep)1643 static int usbf_ep_process_queue(struct usbf_ep *ep)
1644 {
1645 	int (*usbf_ep_xfer)(struct usbf_ep *ep, struct usbf_req *req);
1646 	struct usbf_req *req;
1647 	int is_processing;
1648 	int ret;
1649 
1650 	if (ep->is_in) {
1651 		usbf_ep_xfer = usbf_ep0_pio_in;
1652 		if (ep->id) {
1653 			usbf_ep_xfer = ep->dma_regs ?
1654 					usbf_epn_dma_in : usbf_epn_pio_in;
1655 		}
1656 	} else {
1657 		usbf_ep_xfer = usbf_ep0_pio_out;
1658 		if (ep->id) {
1659 			usbf_ep_xfer = ep->dma_regs ?
1660 					usbf_epn_dma_out : usbf_epn_pio_out;
1661 		}
1662 	}
1663 
1664 	req = list_first_entry_or_null(&ep->queue, struct usbf_req, queue);
1665 	if (!req) {
1666 		dev_err(ep->udc->dev,
1667 			"no request available for ep%u %s process\n", ep->id,
1668 			ep->is_in ? "in" : "out");
1669 		return -ENOENT;
1670 	}
1671 
1672 	do {
1673 		/* Were going to read the FIFO for this current request.
1674 		 * NAK any other incoming data to avoid a race condition if no
1675 		 * more request are available.
1676 		 */
1677 		if (!ep->is_in && ep->id != 0) {
1678 			usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
1679 				USBF_EPN_ONAK);
1680 		}
1681 
1682 		ret = usbf_ep_xfer(ep, req);
1683 		if (ret == -EINPROGRESS) {
1684 			if (!ep->is_in && ep->id != 0) {
1685 				/* The current request needs more data.
1686 				 * Allow incoming data
1687 				 */
1688 				usbf_ep_reg_bitclr(ep, USBF_REG_EPN_CONTROL,
1689 					USBF_EPN_ONAK);
1690 			}
1691 			return ret;
1692 		}
1693 
1694 		is_processing = ep->is_processing;
1695 		ep->is_processing = 1;
1696 		usbf_ep_req_done(ep, req, ret);
1697 		ep->is_processing = is_processing;
1698 
1699 		if (ret) {
1700 			/* An error was detected during the request transfer.
1701 			 * Any pending DMA transfers were aborted by the
1702 			 * usbf_ep_req_done() call.
1703 			 * It's time to flush the fifo
1704 			 */
1705 			if (ep->id == 0)
1706 				usbf_ep0_fifo_flush(ep);
1707 			else
1708 				usbf_epn_fifo_flush(ep);
1709 		}
1710 
1711 		req = list_first_entry_or_null(&ep->queue, struct usbf_req,
1712 					       queue);
1713 
1714 		if (ep->is_in)
1715 			continue;
1716 
1717 		if (ep->id != 0) {
1718 			if (req) {
1719 				/* An other request is available.
1720 				 * Allow incoming data
1721 				 */
1722 				usbf_ep_reg_bitclr(ep, USBF_REG_EPN_CONTROL,
1723 					USBF_EPN_ONAK);
1724 			} else {
1725 				/* No request queued. Disable interrupts.
1726 				 * They will be enabled on usb_ep_queue
1727 				 */
1728 				usbf_ep_reg_bitclr(ep, USBF_REG_EPN_INT_ENA,
1729 					USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
1730 			}
1731 		}
1732 		/* Do not recall usbf_ep_xfer() */
1733 		return req ? -EINPROGRESS : 0;
1734 
1735 	} while (req);
1736 
1737 	return 0;
1738 }
1739 
usbf_ep_stall(struct usbf_ep * ep,bool stall)1740 static void usbf_ep_stall(struct usbf_ep *ep, bool stall)
1741 {
1742 	struct usbf_req *first;
1743 
1744 	dev_dbg(ep->udc->dev, "ep%u %s %s\n", ep->id,
1745 		ep->is_in ? "in" : "out",
1746 		stall ? "stall" : "unstall");
1747 
1748 	if (ep->id == 0) {
1749 		if (stall)
1750 			usbf_ep_reg_bitset(ep, USBF_REG_EP0_CONTROL, USBF_EP0_STL);
1751 		else
1752 			usbf_ep_reg_bitclr(ep, USBF_REG_EP0_CONTROL, USBF_EP0_STL);
1753 		return;
1754 	}
1755 
1756 	if (stall) {
1757 		if (ep->is_in)
1758 			usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
1759 				USBF_EPN_ISTL);
1760 		else
1761 			usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
1762 				USBF_EPN_OSTL | USBF_EPN_OSTL_EN);
1763 	} else {
1764 		first = list_first_entry_or_null(&ep->queue, struct usbf_req, queue);
1765 		if (first && first->is_mapped) {
1766 			/* This can appear if the host halts an endpoint using
1767 			 * SET_FEATURE and then un-halts the endpoint
1768 			 */
1769 			usbf_epn_dma_abort(ep, first);
1770 		}
1771 		usbf_epn_fifo_flush(ep);
1772 		if (ep->is_in) {
1773 			usbf_ep_reg_clrset(ep, USBF_REG_EPN_CONTROL,
1774 				USBF_EPN_ISTL,
1775 				USBF_EPN_IPIDCLR);
1776 		} else {
1777 			usbf_ep_reg_clrset(ep, USBF_REG_EPN_CONTROL,
1778 				USBF_EPN_OSTL,
1779 				USBF_EPN_OSTL_EN | USBF_EPN_OPIDCLR);
1780 		}
1781 		usbf_epn_start_queue(ep);
1782 	}
1783 }
1784 
usbf_ep0_enable(struct usbf_ep * ep0)1785 static void usbf_ep0_enable(struct usbf_ep *ep0)
1786 {
1787 	usbf_ep_reg_writel(ep0, USBF_REG_EP0_CONTROL, USBF_EP0_INAK_EN | USBF_EP0_BCLR);
1788 
1789 	usbf_ep_reg_writel(ep0, USBF_REG_EP0_INT_ENA,
1790 		USBF_EP0_SETUP_EN | USBF_EP0_STG_START_EN | USBF_EP0_STG_END_EN |
1791 		USBF_EP0_OUT_EN | USBF_EP0_OUT_NULL_EN | USBF_EP0_IN_EN);
1792 
1793 	ep0->udc->ep0state = EP0_IDLE;
1794 	ep0->disabled = 0;
1795 
1796 	/* enable interrupts for the ep0 */
1797 	usbf_reg_bitset(ep0->udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(0));
1798 }
1799 
usbf_epn_enable(struct usbf_ep * epn)1800 static int usbf_epn_enable(struct usbf_ep *epn)
1801 {
1802 	u32 base_addr;
1803 	u32 ctrl;
1804 
1805 	base_addr = usbf_ep_info[epn->id].base_addr;
1806 	usbf_ep_reg_writel(epn, USBF_REG_EPN_PCKT_ADRS,
1807 		USBF_EPN_BASEAD(base_addr) | USBF_EPN_MPKT(epn->ep.maxpacket));
1808 
1809 	/* OUT transfer interrupt are enabled during usb_ep_queue */
1810 	if (epn->is_in) {
1811 		/* Will be changed in DMA processing */
1812 		usbf_ep_reg_writel(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_IN_EN);
1813 	}
1814 
1815 	/* Clear, set endpoint direction, set IN/OUT STL, and enable
1816 	 * Send NAK for Data out as request are not queued yet
1817 	 */
1818 	ctrl = USBF_EPN_EN | USBF_EPN_BCLR;
1819 	if (epn->is_in)
1820 		ctrl |= USBF_EPN_OSTL | USBF_EPN_OSTL_EN;
1821 	else
1822 		ctrl |= USBF_EPN_DIR0 | USBF_EPN_ISTL | USBF_EPN_OSTL_EN | USBF_EPN_ONAK;
1823 	usbf_ep_reg_writel(epn, USBF_REG_EPN_CONTROL, ctrl);
1824 
1825 	return 0;
1826 }
1827 
usbf_ep_enable(struct usb_ep * _ep,const struct usb_endpoint_descriptor * desc)1828 static int usbf_ep_enable(struct usb_ep *_ep,
1829 			  const struct usb_endpoint_descriptor *desc)
1830 {
1831 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
1832 	struct usbf_udc *udc = ep->udc;
1833 	unsigned long flags;
1834 	int ret;
1835 
1836 	if (ep->id == 0)
1837 		return -EINVAL;
1838 
1839 	if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
1840 		return -EINVAL;
1841 
1842 	dev_dbg(ep->udc->dev, "ep%u %s mpkts %d\n", ep->id,
1843 		usb_endpoint_dir_in(desc) ? "in" : "out",
1844 		usb_endpoint_maxp(desc));
1845 
1846 	spin_lock_irqsave(&ep->udc->lock, flags);
1847 	ep->is_in = usb_endpoint_dir_in(desc);
1848 	ep->ep.maxpacket = usb_endpoint_maxp(desc);
1849 
1850 	ret = usbf_epn_enable(ep);
1851 	if (ret)
1852 		goto end;
1853 
1854 	ep->disabled = 0;
1855 
1856 	/* enable interrupts for this endpoint */
1857 	usbf_reg_bitset(udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(ep->id));
1858 
1859 	/* enable DMA interrupt at bridge level if DMA is used */
1860 	if (ep->dma_regs) {
1861 		ep->bridge_on_dma_end = NULL;
1862 		usbf_reg_bitset(udc, USBF_REG_AHBBINTEN,
1863 			USBF_SYS_DMA_ENDINTEN_EPN(ep->id));
1864 	}
1865 
1866 	ret = 0;
1867 end:
1868 	spin_unlock_irqrestore(&ep->udc->lock, flags);
1869 	return ret;
1870 }
1871 
usbf_epn_disable(struct usbf_ep * epn)1872 static int usbf_epn_disable(struct usbf_ep *epn)
1873 {
1874 	/* Disable interrupts */
1875 	usbf_ep_reg_writel(epn, USBF_REG_EPN_INT_ENA, 0);
1876 
1877 	/* Disable endpoint */
1878 	usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_EN);
1879 
1880 	/* remove anything that was pending */
1881 	usbf_ep_nuke(epn, -ESHUTDOWN);
1882 
1883 	return 0;
1884 }
1885 
usbf_ep_disable(struct usb_ep * _ep)1886 static int usbf_ep_disable(struct usb_ep *_ep)
1887 {
1888 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
1889 	struct usbf_udc *udc = ep->udc;
1890 	unsigned long flags;
1891 	int ret;
1892 
1893 	if (ep->id == 0)
1894 		return -EINVAL;
1895 
1896 	dev_dbg(ep->udc->dev, "ep%u %s mpkts %d\n", ep->id,
1897 		ep->is_in ? "in" : "out", ep->ep.maxpacket);
1898 
1899 	spin_lock_irqsave(&ep->udc->lock, flags);
1900 	ep->disabled = 1;
1901 	/* Disable DMA interrupt */
1902 	if (ep->dma_regs) {
1903 		usbf_reg_bitclr(udc, USBF_REG_AHBBINTEN,
1904 			USBF_SYS_DMA_ENDINTEN_EPN(ep->id));
1905 		ep->bridge_on_dma_end = NULL;
1906 	}
1907 	/* disable interrupts for this endpoint */
1908 	usbf_reg_bitclr(udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(ep->id));
1909 	/* and the endpoint itself */
1910 	ret = usbf_epn_disable(ep);
1911 	spin_unlock_irqrestore(&ep->udc->lock, flags);
1912 
1913 	return ret;
1914 }
1915 
usbf_ep0_queue(struct usbf_ep * ep0,struct usbf_req * req,gfp_t gfp_flags)1916 static int usbf_ep0_queue(struct usbf_ep *ep0, struct usbf_req *req,
1917 			  gfp_t gfp_flags)
1918 {
1919 	int ret;
1920 
1921 	req->req.actual = 0;
1922 	req->req.status = -EINPROGRESS;
1923 	req->is_zero_sent = 0;
1924 
1925 	list_add_tail(&req->queue, &ep0->queue);
1926 
1927 	if (ep0->udc->ep0state == EP0_IN_STATUS_START_PHASE)
1928 		return 0;
1929 
1930 	if (!ep0->is_in)
1931 		return 0;
1932 
1933 	if (ep0->udc->ep0state == EP0_IN_STATUS_PHASE) {
1934 		if (req->req.length) {
1935 			dev_err(ep0->udc->dev,
1936 				"request lng %u for ep0 in status phase\n",
1937 				req->req.length);
1938 			return -EINVAL;
1939 		}
1940 		ep0->delayed_status = 0;
1941 	}
1942 	if (!ep0->is_processing) {
1943 		ret = usbf_ep0_pio_in(ep0, req);
1944 		if (ret != -EINPROGRESS) {
1945 			dev_err(ep0->udc->dev,
1946 				"queued request not in progress\n");
1947 			/* The request cannot be completed (ie
1948 			 * ret == 0) on the first call
1949 			 */
1950 			return ret ? ret : -EIO;
1951 		}
1952 	}
1953 
1954 	return 0;
1955 }
1956 
usbf_epn_queue(struct usbf_ep * ep,struct usbf_req * req,gfp_t gfp_flags)1957 static int usbf_epn_queue(struct usbf_ep *ep, struct usbf_req *req,
1958 			  gfp_t gfp_flags)
1959 {
1960 	int was_empty;
1961 	int ret;
1962 
1963 	if (ep->disabled) {
1964 		dev_err(ep->udc->dev, "ep%u request queue while disable\n",
1965 			ep->id);
1966 		return -ESHUTDOWN;
1967 	}
1968 
1969 	req->req.actual = 0;
1970 	req->req.status = -EINPROGRESS;
1971 	req->is_zero_sent = 0;
1972 	req->xfer_step = USBF_XFER_START;
1973 
1974 	was_empty = list_empty(&ep->queue);
1975 	list_add_tail(&req->queue, &ep->queue);
1976 	if (was_empty) {
1977 		ret = usbf_epn_start_queue(ep);
1978 		if (ret)
1979 			return ret;
1980 	}
1981 	return 0;
1982 }
1983 
usbf_ep_queue(struct usb_ep * _ep,struct usb_request * _req,gfp_t gfp_flags)1984 static int usbf_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1985 			 gfp_t gfp_flags)
1986 {
1987 	struct usbf_req *req = container_of(_req, struct usbf_req, req);
1988 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
1989 	struct usbf_udc *udc = ep->udc;
1990 	unsigned long flags;
1991 	int ret;
1992 
1993 	if (!_req || !_req->buf)
1994 		return -EINVAL;
1995 
1996 	if (!udc || !udc->driver)
1997 		return -EINVAL;
1998 
1999 	dev_dbg(ep->udc->dev, "ep%u %s req queue length %u, zero %u, short_not_ok %u\n",
2000 		ep->id, ep->is_in ? "in" : "out",
2001 		req->req.length, req->req.zero, req->req.short_not_ok);
2002 
2003 	spin_lock_irqsave(&ep->udc->lock, flags);
2004 	if (ep->id == 0)
2005 		ret = usbf_ep0_queue(ep, req, gfp_flags);
2006 	else
2007 		ret = usbf_epn_queue(ep, req, gfp_flags);
2008 	spin_unlock_irqrestore(&ep->udc->lock, flags);
2009 	return ret;
2010 }
2011 
usbf_ep_dequeue(struct usb_ep * _ep,struct usb_request * _req)2012 static int usbf_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
2013 {
2014 	struct usbf_req *req = container_of(_req, struct usbf_req, req);
2015 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
2016 	unsigned long flags;
2017 	int is_processing;
2018 	int first;
2019 	int ret;
2020 
2021 	spin_lock_irqsave(&ep->udc->lock, flags);
2022 
2023 	dev_dbg(ep->udc->dev, "ep%u %s req dequeue length %u/%u\n",
2024 		ep->id, ep->is_in ? "in" : "out",
2025 		req->req.actual, req->req.length);
2026 
2027 	first = list_is_first(&req->queue, &ep->queue);
2028 
2029 	/* Complete the request but avoid any operation that could be done
2030 	 * if a new request is queued during the request completion
2031 	 */
2032 	is_processing = ep->is_processing;
2033 	ep->is_processing = 1;
2034 	usbf_ep_req_done(ep, req, -ECONNRESET);
2035 	ep->is_processing = is_processing;
2036 
2037 	if (first) {
2038 		/* The first item in the list was dequeued.
2039 		 * This item could already be submitted to the hardware.
2040 		 * So, flush the fifo
2041 		 */
2042 		if (ep->id)
2043 			usbf_epn_fifo_flush(ep);
2044 		else
2045 			usbf_ep0_fifo_flush(ep);
2046 	}
2047 
2048 	if (ep->id == 0) {
2049 		/* We dequeue a request on ep0. On this endpoint, we can have
2050 		 * 1 request related to the data stage and/or 1 request
2051 		 * related to the status stage.
2052 		 * We dequeue one of them and so the USB control transaction
2053 		 * is no more coherent. The simple way to be consistent after
2054 		 * dequeuing is to stall and nuke the endpoint and wait the
2055 		 * next SETUP packet.
2056 		 */
2057 		usbf_ep_stall(ep, true);
2058 		usbf_ep_nuke(ep, -ECONNRESET);
2059 		ep->udc->ep0state = EP0_IDLE;
2060 		goto end;
2061 	}
2062 
2063 	if (!first)
2064 		goto end;
2065 
2066 	ret = usbf_epn_start_queue(ep);
2067 	if (ret) {
2068 		usbf_ep_stall(ep, true);
2069 		usbf_ep_nuke(ep, -EIO);
2070 	}
2071 end:
2072 	spin_unlock_irqrestore(&ep->udc->lock, flags);
2073 	return 0;
2074 }
2075 
usbf_ep_alloc_request(struct usb_ep * _ep,gfp_t gfp_flags)2076 static struct usb_request *usbf_ep_alloc_request(struct usb_ep *_ep,
2077 						 gfp_t gfp_flags)
2078 {
2079 	struct usbf_req *req;
2080 
2081 	if (!_ep)
2082 		return NULL;
2083 
2084 	req = kzalloc(sizeof(*req), gfp_flags);
2085 	if (!req)
2086 		return NULL;
2087 
2088 	INIT_LIST_HEAD(&req->queue);
2089 
2090 	return &req->req;
2091 }
2092 
usbf_ep_free_request(struct usb_ep * _ep,struct usb_request * _req)2093 static void usbf_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
2094 {
2095 	struct usbf_req *req;
2096 	unsigned long flags;
2097 	struct usbf_ep *ep;
2098 
2099 	if (!_ep || !_req)
2100 		return;
2101 
2102 	req = container_of(_req, struct usbf_req, req);
2103 	ep = container_of(_ep, struct usbf_ep, ep);
2104 
2105 	spin_lock_irqsave(&ep->udc->lock, flags);
2106 	list_del_init(&req->queue);
2107 	spin_unlock_irqrestore(&ep->udc->lock, flags);
2108 	kfree(req);
2109 }
2110 
usbf_ep_set_halt(struct usb_ep * _ep,int halt)2111 static int usbf_ep_set_halt(struct usb_ep *_ep, int halt)
2112 {
2113 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
2114 	unsigned long flags;
2115 	int ret;
2116 
2117 	if (ep->id == 0)
2118 		return -EINVAL;
2119 
2120 	spin_lock_irqsave(&ep->udc->lock, flags);
2121 
2122 	if (!list_empty(&ep->queue)) {
2123 		ret = -EAGAIN;
2124 		goto end;
2125 	}
2126 
2127 	usbf_ep_stall(ep, halt);
2128 	if (!halt)
2129 		ep->is_wedged = 0;
2130 
2131 	ret = 0;
2132 end:
2133 	spin_unlock_irqrestore(&ep->udc->lock, flags);
2134 
2135 	return ret;
2136 }
2137 
usbf_ep_set_wedge(struct usb_ep * _ep)2138 static int usbf_ep_set_wedge(struct usb_ep *_ep)
2139 {
2140 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
2141 	unsigned long flags;
2142 	int ret;
2143 
2144 	if (ep->id == 0)
2145 		return -EINVAL;
2146 
2147 	spin_lock_irqsave(&ep->udc->lock, flags);
2148 	if (!list_empty(&ep->queue)) {
2149 		ret = -EAGAIN;
2150 		goto end;
2151 	}
2152 	usbf_ep_stall(ep, 1);
2153 	ep->is_wedged = 1;
2154 
2155 	ret = 0;
2156 end:
2157 	spin_unlock_irqrestore(&ep->udc->lock, flags);
2158 	return ret;
2159 }
2160 
2161 static struct usb_ep_ops usbf_ep_ops = {
2162 	.enable = usbf_ep_enable,
2163 	.disable = usbf_ep_disable,
2164 	.queue = usbf_ep_queue,
2165 	.dequeue = usbf_ep_dequeue,
2166 	.set_halt = usbf_ep_set_halt,
2167 	.set_wedge = usbf_ep_set_wedge,
2168 	.alloc_request = usbf_ep_alloc_request,
2169 	.free_request = usbf_ep_free_request,
2170 };
2171 
usbf_ep0_req_complete(struct usb_ep * _ep,struct usb_request * _req)2172 static void usbf_ep0_req_complete(struct usb_ep *_ep, struct usb_request *_req)
2173 {
2174 }
2175 
usbf_ep0_fill_req(struct usbf_ep * ep0,struct usbf_req * req,void * buf,unsigned int length,void (* complete)(struct usb_ep * _ep,struct usb_request * _req))2176 static void usbf_ep0_fill_req(struct usbf_ep *ep0, struct usbf_req *req,
2177 			      void *buf, unsigned int length,
2178 			      void (*complete)(struct usb_ep *_ep,
2179 					       struct usb_request *_req))
2180 {
2181 	if (buf && length)
2182 		memcpy(ep0->udc->ep0_buf, buf, length);
2183 
2184 	req->req.buf = ep0->udc->ep0_buf;
2185 	req->req.length = length;
2186 	req->req.dma = 0;
2187 	req->req.zero = true;
2188 	req->req.complete = complete ? complete : usbf_ep0_req_complete;
2189 	req->req.status = -EINPROGRESS;
2190 	req->req.context = NULL;
2191 	req->req.actual = 0;
2192 }
2193 
usbf_get_ep_by_addr(struct usbf_udc * udc,u8 address)2194 static struct usbf_ep *usbf_get_ep_by_addr(struct usbf_udc *udc, u8 address)
2195 {
2196 	struct usbf_ep *ep;
2197 	unsigned int i;
2198 
2199 	if ((address & USB_ENDPOINT_NUMBER_MASK) == 0)
2200 		return &udc->ep[0];
2201 
2202 	for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
2203 		ep = &udc->ep[i];
2204 
2205 		if (!ep->ep.desc)
2206 			continue;
2207 
2208 		if (ep->ep.desc->bEndpointAddress == address)
2209 			return ep;
2210 	}
2211 
2212 	return NULL;
2213 }
2214 
usbf_req_delegate(struct usbf_udc * udc,const struct usb_ctrlrequest * ctrlrequest)2215 static int usbf_req_delegate(struct usbf_udc *udc,
2216 			     const struct usb_ctrlrequest *ctrlrequest)
2217 {
2218 	int ret;
2219 
2220 	spin_unlock(&udc->lock);
2221 	ret = udc->driver->setup(&udc->gadget, ctrlrequest);
2222 	spin_lock(&udc->lock);
2223 	if (ret < 0) {
2224 		dev_dbg(udc->dev, "udc driver setup failed %d\n", ret);
2225 		return ret;
2226 	}
2227 	if (ret == USB_GADGET_DELAYED_STATUS) {
2228 		dev_dbg(udc->dev, "delayed status set\n");
2229 		udc->ep[0].delayed_status = 1;
2230 		return 0;
2231 	}
2232 	return ret;
2233 }
2234 
usbf_req_get_status(struct usbf_udc * udc,const struct usb_ctrlrequest * ctrlrequest)2235 static int usbf_req_get_status(struct usbf_udc *udc,
2236 			       const struct usb_ctrlrequest *ctrlrequest)
2237 {
2238 	struct usbf_ep *ep;
2239 	u16 status_data;
2240 	u16 wLength;
2241 	u16 wValue;
2242 	u16 wIndex;
2243 
2244 	wValue  = le16_to_cpu(ctrlrequest->wValue);
2245 	wLength = le16_to_cpu(ctrlrequest->wLength);
2246 	wIndex  = le16_to_cpu(ctrlrequest->wIndex);
2247 
2248 	switch (ctrlrequest->bRequestType) {
2249 	case USB_DIR_IN | USB_RECIP_DEVICE | USB_TYPE_STANDARD:
2250 		if ((wValue != 0) || (wIndex != 0) || (wLength != 2))
2251 			goto delegate;
2252 
2253 		status_data = 0;
2254 		if (udc->gadget.is_selfpowered)
2255 			status_data |= BIT(USB_DEVICE_SELF_POWERED);
2256 
2257 		if (udc->is_remote_wakeup)
2258 			status_data |= BIT(USB_DEVICE_REMOTE_WAKEUP);
2259 
2260 		break;
2261 
2262 	case USB_DIR_IN | USB_RECIP_ENDPOINT | USB_TYPE_STANDARD:
2263 		if ((wValue != 0) || (wLength != 2))
2264 			goto delegate;
2265 
2266 		ep = usbf_get_ep_by_addr(udc, wIndex);
2267 		if (!ep)
2268 			return -EINVAL;
2269 
2270 		status_data = 0;
2271 		if (usbf_ep_is_stalled(ep))
2272 			status_data |= cpu_to_le16(1);
2273 		break;
2274 
2275 	case USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_STANDARD:
2276 		if ((wValue != 0) || (wLength != 2))
2277 			goto delegate;
2278 		status_data = 0;
2279 		break;
2280 
2281 	default:
2282 		goto delegate;
2283 	}
2284 
2285 	usbf_ep0_fill_req(&udc->ep[0], &udc->setup_reply, &status_data,
2286 			  sizeof(status_data), NULL);
2287 	usbf_ep0_queue(&udc->ep[0], &udc->setup_reply, GFP_ATOMIC);
2288 
2289 	return 0;
2290 
2291 delegate:
2292 	return usbf_req_delegate(udc, ctrlrequest);
2293 }
2294 
usbf_req_clear_set_feature(struct usbf_udc * udc,const struct usb_ctrlrequest * ctrlrequest,bool is_set)2295 static int usbf_req_clear_set_feature(struct usbf_udc *udc,
2296 				      const struct usb_ctrlrequest *ctrlrequest,
2297 				      bool is_set)
2298 {
2299 	struct usbf_ep *ep;
2300 	u16 wLength;
2301 	u16 wValue;
2302 	u16 wIndex;
2303 
2304 	wValue  = le16_to_cpu(ctrlrequest->wValue);
2305 	wLength = le16_to_cpu(ctrlrequest->wLength);
2306 	wIndex  = le16_to_cpu(ctrlrequest->wIndex);
2307 
2308 	switch (ctrlrequest->bRequestType) {
2309 	case USB_DIR_OUT | USB_RECIP_DEVICE:
2310 		if ((wIndex != 0) || (wLength != 0))
2311 			goto delegate;
2312 
2313 		if (wValue != cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP))
2314 			goto delegate;
2315 
2316 		udc->is_remote_wakeup = is_set;
2317 		break;
2318 
2319 	case USB_DIR_OUT | USB_RECIP_ENDPOINT:
2320 		if (wLength != 0)
2321 			goto delegate;
2322 
2323 		ep = usbf_get_ep_by_addr(udc, wIndex);
2324 		if (!ep)
2325 			return -EINVAL;
2326 
2327 		if ((ep->id == 0) && is_set) {
2328 			/* Endpoint 0 cannot be halted (stalled)
2329 			 * Returning an error code leads to a STALL on this ep0
2330 			 * but keep the automate in a consistent state.
2331 			 */
2332 			return -EINVAL;
2333 		}
2334 		if (ep->is_wedged && !is_set) {
2335 			/* Ignore CLEAR_FEATURE(HALT ENDPOINT) when the
2336 			 * endpoint is wedged
2337 			 */
2338 			break;
2339 		}
2340 		usbf_ep_stall(ep, is_set);
2341 		break;
2342 
2343 	default:
2344 		goto delegate;
2345 	}
2346 
2347 	return 0;
2348 
2349 delegate:
2350 	return usbf_req_delegate(udc, ctrlrequest);
2351 }
2352 
usbf_ep0_req_set_address_complete(struct usb_ep * _ep,struct usb_request * _req)2353 static void usbf_ep0_req_set_address_complete(struct usb_ep *_ep,
2354 					      struct usb_request *_req)
2355 {
2356 	struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
2357 
2358 	/* The status phase of the SET_ADDRESS request is completed ... */
2359 	if (_req->status == 0) {
2360 		/* ... without any errors -> Signaled the state to the core. */
2361 		usb_gadget_set_state(&ep->udc->gadget, USB_STATE_ADDRESS);
2362 	}
2363 
2364 	/* In case of request failure, there is no need to revert the address
2365 	 * value set to the hardware as the hardware will take care of the
2366 	 * value only if the status stage is completed normally.
2367 	 */
2368 }
2369 
usbf_req_set_address(struct usbf_udc * udc,const struct usb_ctrlrequest * ctrlrequest)2370 static int usbf_req_set_address(struct usbf_udc *udc,
2371 				const struct usb_ctrlrequest *ctrlrequest)
2372 {
2373 	u16 wLength;
2374 	u16 wValue;
2375 	u16 wIndex;
2376 	u32 addr;
2377 
2378 	wValue  = le16_to_cpu(ctrlrequest->wValue);
2379 	wLength = le16_to_cpu(ctrlrequest->wLength);
2380 	wIndex  = le16_to_cpu(ctrlrequest->wIndex);
2381 
2382 	if (ctrlrequest->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
2383 		goto delegate;
2384 
2385 	if ((wIndex != 0) || (wLength != 0) || (wValue > 127))
2386 		return -EINVAL;
2387 
2388 	addr = wValue;
2389 	/* The hardware will take care of this USB address after the status
2390 	 * stage of the SET_ADDRESS request is completed normally.
2391 	 * It is safe to write it now
2392 	 */
2393 	usbf_reg_writel(udc, USBF_REG_USB_ADDRESS, USBF_USB_SET_USB_ADDR(addr));
2394 
2395 	/* Queued the status request */
2396 	usbf_ep0_fill_req(&udc->ep[0], &udc->setup_reply, NULL, 0,
2397 			  usbf_ep0_req_set_address_complete);
2398 	usbf_ep0_queue(&udc->ep[0], &udc->setup_reply, GFP_ATOMIC);
2399 
2400 	return 0;
2401 
2402 delegate:
2403 	return usbf_req_delegate(udc, ctrlrequest);
2404 }
2405 
usbf_req_set_configuration(struct usbf_udc * udc,const struct usb_ctrlrequest * ctrlrequest)2406 static int usbf_req_set_configuration(struct usbf_udc *udc,
2407 				      const struct usb_ctrlrequest *ctrlrequest)
2408 {
2409 	u16 wLength;
2410 	u16 wValue;
2411 	u16 wIndex;
2412 	int ret;
2413 
2414 	ret = usbf_req_delegate(udc, ctrlrequest);
2415 	if (ret)
2416 		return ret;
2417 
2418 	wValue  = le16_to_cpu(ctrlrequest->wValue);
2419 	wLength = le16_to_cpu(ctrlrequest->wLength);
2420 	wIndex  = le16_to_cpu(ctrlrequest->wIndex);
2421 
2422 	if ((ctrlrequest->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE)) ||
2423 	    (wIndex != 0) || (wLength != 0)) {
2424 		/* No error detected by driver->setup() but it is not an USB2.0
2425 		 * Ch9 SET_CONFIGURATION.
2426 		 * Nothing more to do
2427 		 */
2428 		return 0;
2429 	}
2430 
2431 	if (wValue & 0x00FF) {
2432 		usbf_reg_bitset(udc, USBF_REG_USB_CONTROL, USBF_USB_CONF);
2433 	} else {
2434 		usbf_reg_bitclr(udc, USBF_REG_USB_CONTROL, USBF_USB_CONF);
2435 		/* Go back to Address State */
2436 		spin_unlock(&udc->lock);
2437 		usb_gadget_set_state(&udc->gadget, USB_STATE_ADDRESS);
2438 		spin_lock(&udc->lock);
2439 	}
2440 
2441 	return 0;
2442 }
2443 
usbf_handle_ep0_setup(struct usbf_ep * ep0)2444 static int usbf_handle_ep0_setup(struct usbf_ep *ep0)
2445 {
2446 	union {
2447 		struct usb_ctrlrequest ctrlreq;
2448 		u32 raw[2];
2449 	} crq;
2450 	struct usbf_udc *udc = ep0->udc;
2451 	int ret;
2452 
2453 	/* Read setup data (ie the USB control request) */
2454 	crq.raw[0] = usbf_reg_readl(udc, USBF_REG_SETUP_DATA0);
2455 	crq.raw[1] = usbf_reg_readl(udc, USBF_REG_SETUP_DATA1);
2456 
2457 	dev_dbg(ep0->udc->dev,
2458 		"ep0 req%02x.%02x, wValue 0x%04x, wIndex 0x%04x, wLength 0x%04x\n",
2459 		crq.ctrlreq.bRequestType, crq.ctrlreq.bRequest,
2460 		crq.ctrlreq.wValue, crq.ctrlreq.wIndex, crq.ctrlreq.wLength);
2461 
2462 	/* Set current EP0 state according to the received request */
2463 	if (crq.ctrlreq.wLength) {
2464 		if (crq.ctrlreq.bRequestType & USB_DIR_IN) {
2465 			udc->ep0state = EP0_IN_DATA_PHASE;
2466 			usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
2467 				USBF_EP0_INAK,
2468 				USBF_EP0_INAK_EN);
2469 			ep0->is_in = 1;
2470 		} else {
2471 			udc->ep0state = EP0_OUT_DATA_PHASE;
2472 			usbf_ep_reg_bitclr(ep0, USBF_REG_EP0_CONTROL,
2473 				USBF_EP0_ONAK);
2474 			ep0->is_in = 0;
2475 		}
2476 	} else {
2477 		udc->ep0state = EP0_IN_STATUS_START_PHASE;
2478 		ep0->is_in = 1;
2479 	}
2480 
2481 	/* We starts a new control transfer -> Clear the delayed status flag */
2482 	ep0->delayed_status = 0;
2483 
2484 	if ((crq.ctrlreq.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) {
2485 		/* This is not a USB standard request -> delelate */
2486 		goto delegate;
2487 	}
2488 
2489 	switch (crq.ctrlreq.bRequest) {
2490 	case USB_REQ_GET_STATUS:
2491 		ret = usbf_req_get_status(udc, &crq.ctrlreq);
2492 		break;
2493 
2494 	case USB_REQ_CLEAR_FEATURE:
2495 		ret = usbf_req_clear_set_feature(udc, &crq.ctrlreq, false);
2496 		break;
2497 
2498 	case USB_REQ_SET_FEATURE:
2499 		ret = usbf_req_clear_set_feature(udc, &crq.ctrlreq, true);
2500 		break;
2501 
2502 	case USB_REQ_SET_ADDRESS:
2503 		ret = usbf_req_set_address(udc, &crq.ctrlreq);
2504 		break;
2505 
2506 	case USB_REQ_SET_CONFIGURATION:
2507 		ret = usbf_req_set_configuration(udc, &crq.ctrlreq);
2508 		break;
2509 
2510 	default:
2511 		goto delegate;
2512 	}
2513 
2514 	return ret;
2515 
2516 delegate:
2517 	return usbf_req_delegate(udc, &crq.ctrlreq);
2518 }
2519 
usbf_handle_ep0_data_status(struct usbf_ep * ep0,const char * ep0state_name,enum usbf_ep0state next_ep0state)2520 static int usbf_handle_ep0_data_status(struct usbf_ep *ep0,
2521 				  const char *ep0state_name,
2522 				  enum usbf_ep0state next_ep0state)
2523 {
2524 	struct usbf_udc *udc = ep0->udc;
2525 	int ret;
2526 
2527 	ret = usbf_ep_process_queue(ep0);
2528 	switch (ret) {
2529 	case -ENOENT:
2530 		dev_err(udc->dev,
2531 			"no request available for ep0 %s phase\n",
2532 			ep0state_name);
2533 		break;
2534 	case -EINPROGRESS:
2535 		/* More data needs to be processed */
2536 		ret = 0;
2537 		break;
2538 	case 0:
2539 		/* All requests in the queue are processed */
2540 		udc->ep0state = next_ep0state;
2541 		break;
2542 	default:
2543 		dev_err(udc->dev,
2544 			"process queue failed for ep0 %s phase (%d)\n",
2545 			ep0state_name, ret);
2546 		break;
2547 	}
2548 	return ret;
2549 }
2550 
usbf_handle_ep0_out_status_start(struct usbf_ep * ep0)2551 static int usbf_handle_ep0_out_status_start(struct usbf_ep *ep0)
2552 {
2553 	struct usbf_udc *udc = ep0->udc;
2554 	struct usbf_req *req;
2555 
2556 	usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
2557 				USBF_EP0_ONAK,
2558 				USBF_EP0_PIDCLR);
2559 	ep0->is_in = 0;
2560 
2561 	req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
2562 	if (!req) {
2563 		usbf_ep0_fill_req(ep0, &udc->setup_reply, NULL, 0, NULL);
2564 		usbf_ep0_queue(ep0, &udc->setup_reply, GFP_ATOMIC);
2565 	} else {
2566 		if (req->req.length) {
2567 			dev_err(udc->dev,
2568 				"queued request length %u for ep0 out status phase\n",
2569 				req->req.length);
2570 		}
2571 	}
2572 	udc->ep0state = EP0_OUT_STATUS_PHASE;
2573 	return 0;
2574 }
2575 
usbf_handle_ep0_in_status_start(struct usbf_ep * ep0)2576 static int usbf_handle_ep0_in_status_start(struct usbf_ep *ep0)
2577 {
2578 	struct usbf_udc *udc = ep0->udc;
2579 	struct usbf_req *req;
2580 	int ret;
2581 
2582 	usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
2583 				USBF_EP0_INAK,
2584 				USBF_EP0_INAK_EN | USBF_EP0_PIDCLR);
2585 	ep0->is_in = 1;
2586 
2587 	/* Queue request for status if needed */
2588 	req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
2589 	if (!req) {
2590 		if (ep0->delayed_status) {
2591 			dev_dbg(ep0->udc->dev,
2592 				"EP0_IN_STATUS_START_PHASE ep0->delayed_status set\n");
2593 			udc->ep0state = EP0_IN_STATUS_PHASE;
2594 			return 0;
2595 		}
2596 
2597 		usbf_ep0_fill_req(ep0, &udc->setup_reply, NULL,
2598 			  0, NULL);
2599 		usbf_ep0_queue(ep0, &udc->setup_reply,
2600 			       GFP_ATOMIC);
2601 
2602 		req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
2603 	} else {
2604 		if (req->req.length) {
2605 			dev_err(udc->dev,
2606 				"queued request length %u for ep0 in status phase\n",
2607 				req->req.length);
2608 		}
2609 	}
2610 
2611 	ret = usbf_ep0_pio_in(ep0, req);
2612 	if (ret != -EINPROGRESS) {
2613 		usbf_ep_req_done(ep0, req, ret);
2614 		udc->ep0state = EP0_IN_STATUS_END_PHASE;
2615 		return 0;
2616 	}
2617 
2618 	udc->ep0state = EP0_IN_STATUS_PHASE;
2619 	return 0;
2620 }
2621 
usbf_ep0_interrupt(struct usbf_ep * ep0)2622 static void usbf_ep0_interrupt(struct usbf_ep *ep0)
2623 {
2624 	struct usbf_udc *udc = ep0->udc;
2625 	u32 sts, prev_sts;
2626 	int prev_ep0state;
2627 	int ret;
2628 
2629 	ep0->status = usbf_ep_reg_readl(ep0, USBF_REG_EP0_STATUS);
2630 	usbf_ep_reg_writel(ep0, USBF_REG_EP0_STATUS, ~ep0->status);
2631 
2632 	dev_dbg(ep0->udc->dev, "ep0 status=0x%08x, enable=%08x\n, ctrl=0x%08x\n",
2633 		ep0->status,
2634 		usbf_ep_reg_readl(ep0, USBF_REG_EP0_INT_ENA),
2635 		usbf_ep_reg_readl(ep0, USBF_REG_EP0_CONTROL));
2636 
2637 	sts = ep0->status & (USBF_EP0_SETUP_INT | USBF_EP0_IN_INT | USBF_EP0_OUT_INT |
2638 			     USBF_EP0_OUT_NULL_INT | USBF_EP0_STG_START_INT |
2639 			     USBF_EP0_STG_END_INT);
2640 
2641 	ret = 0;
2642 	do {
2643 		dev_dbg(ep0->udc->dev, "udc->ep0state=%d\n", udc->ep0state);
2644 
2645 		prev_sts = sts;
2646 		prev_ep0state = udc->ep0state;
2647 		switch (udc->ep0state) {
2648 		case EP0_IDLE:
2649 			if (!(sts & USBF_EP0_SETUP_INT))
2650 				break;
2651 
2652 			sts &= ~USBF_EP0_SETUP_INT;
2653 			dev_dbg(ep0->udc->dev, "ep0 handle setup\n");
2654 			ret = usbf_handle_ep0_setup(ep0);
2655 			break;
2656 
2657 		case EP0_IN_DATA_PHASE:
2658 			if (!(sts & USBF_EP0_IN_INT))
2659 				break;
2660 
2661 			sts &= ~USBF_EP0_IN_INT;
2662 			dev_dbg(ep0->udc->dev, "ep0 handle in data phase\n");
2663 			ret = usbf_handle_ep0_data_status(ep0,
2664 				"in data", EP0_OUT_STATUS_START_PHASE);
2665 			break;
2666 
2667 		case EP0_OUT_STATUS_START_PHASE:
2668 			if (!(sts & USBF_EP0_STG_START_INT))
2669 				break;
2670 
2671 			sts &= ~USBF_EP0_STG_START_INT;
2672 			dev_dbg(ep0->udc->dev, "ep0 handle out status start phase\n");
2673 			ret = usbf_handle_ep0_out_status_start(ep0);
2674 			break;
2675 
2676 		case EP0_OUT_STATUS_PHASE:
2677 			if (!(sts & (USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT)))
2678 				break;
2679 
2680 			sts &= ~(USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT);
2681 			dev_dbg(ep0->udc->dev, "ep0 handle out status phase\n");
2682 			ret = usbf_handle_ep0_data_status(ep0,
2683 				"out status",
2684 				EP0_OUT_STATUS_END_PHASE);
2685 			break;
2686 
2687 		case EP0_OUT_STATUS_END_PHASE:
2688 			if (!(sts & (USBF_EP0_STG_END_INT | USBF_EP0_SETUP_INT)))
2689 				break;
2690 
2691 			sts &= ~USBF_EP0_STG_END_INT;
2692 			dev_dbg(ep0->udc->dev, "ep0 handle out status end phase\n");
2693 			udc->ep0state = EP0_IDLE;
2694 			break;
2695 
2696 		case EP0_OUT_DATA_PHASE:
2697 			if (!(sts & (USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT)))
2698 				break;
2699 
2700 			sts &= ~(USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT);
2701 			dev_dbg(ep0->udc->dev, "ep0 handle out data phase\n");
2702 			ret = usbf_handle_ep0_data_status(ep0,
2703 				"out data", EP0_IN_STATUS_START_PHASE);
2704 			break;
2705 
2706 		case EP0_IN_STATUS_START_PHASE:
2707 			if (!(sts & USBF_EP0_STG_START_INT))
2708 				break;
2709 
2710 			sts &= ~USBF_EP0_STG_START_INT;
2711 			dev_dbg(ep0->udc->dev, "ep0 handle in status start phase\n");
2712 			ret = usbf_handle_ep0_in_status_start(ep0);
2713 			break;
2714 
2715 		case EP0_IN_STATUS_PHASE:
2716 			if (!(sts & USBF_EP0_IN_INT))
2717 				break;
2718 
2719 			sts &= ~USBF_EP0_IN_INT;
2720 			dev_dbg(ep0->udc->dev, "ep0 handle in status phase\n");
2721 			ret = usbf_handle_ep0_data_status(ep0,
2722 				"in status", EP0_IN_STATUS_END_PHASE);
2723 			break;
2724 
2725 		case EP0_IN_STATUS_END_PHASE:
2726 			if (!(sts & (USBF_EP0_STG_END_INT | USBF_EP0_SETUP_INT)))
2727 				break;
2728 
2729 			sts &= ~USBF_EP0_STG_END_INT;
2730 			dev_dbg(ep0->udc->dev, "ep0 handle in status end\n");
2731 			udc->ep0state = EP0_IDLE;
2732 			break;
2733 
2734 		default:
2735 			udc->ep0state = EP0_IDLE;
2736 			break;
2737 		}
2738 
2739 		if (ret) {
2740 			dev_dbg(ep0->udc->dev, "ep0 failed (%d)\n", ret);
2741 			/* Failure -> stall.
2742 			 * This stall state will be automatically cleared when
2743 			 * the IP receives the next SETUP packet
2744 			 */
2745 			usbf_ep_stall(ep0, true);
2746 
2747 			/* Remove anything that was pending */
2748 			usbf_ep_nuke(ep0, -EPROTO);
2749 
2750 			udc->ep0state = EP0_IDLE;
2751 			break;
2752 		}
2753 
2754 	} while ((prev_ep0state != udc->ep0state) || (prev_sts != sts));
2755 
2756 	dev_dbg(ep0->udc->dev, "ep0 done udc->ep0state=%d, status=0x%08x. next=0x%08x\n",
2757 		udc->ep0state, sts,
2758 		usbf_ep_reg_readl(ep0, USBF_REG_EP0_STATUS));
2759 }
2760 
usbf_epn_process_queue(struct usbf_ep * epn)2761 static void usbf_epn_process_queue(struct usbf_ep *epn)
2762 {
2763 	int ret;
2764 
2765 	ret = usbf_ep_process_queue(epn);
2766 	switch (ret) {
2767 	case -ENOENT:
2768 		dev_warn(epn->udc->dev, "ep%u %s, no request available\n",
2769 			epn->id, epn->is_in ? "in" : "out");
2770 		break;
2771 	case -EINPROGRESS:
2772 		/* More data needs to be processed */
2773 		ret = 0;
2774 		break;
2775 	case 0:
2776 		/* All requests in the queue are processed */
2777 		break;
2778 	default:
2779 		dev_err(epn->udc->dev, "ep%u %s, process queue failed (%d)\n",
2780 			epn->id, epn->is_in ? "in" : "out", ret);
2781 		break;
2782 	}
2783 
2784 	if (ret) {
2785 		dev_dbg(epn->udc->dev, "ep%u %s failed (%d)\n", epn->id,
2786 			epn->is_in ? "in" : "out", ret);
2787 		usbf_ep_stall(epn, true);
2788 		usbf_ep_nuke(epn, ret);
2789 	}
2790 }
2791 
usbf_epn_interrupt(struct usbf_ep * epn)2792 static void usbf_epn_interrupt(struct usbf_ep *epn)
2793 {
2794 	u32 sts;
2795 	u32 ena;
2796 
2797 	epn->status = usbf_ep_reg_readl(epn, USBF_REG_EPN_STATUS);
2798 	ena = usbf_ep_reg_readl(epn, USBF_REG_EPN_INT_ENA);
2799 	usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(epn->status & ena));
2800 
2801 	dev_dbg(epn->udc->dev, "ep%u %s status=0x%08x, enable=%08x\n, ctrl=0x%08x\n",
2802 		epn->id, epn->is_in ? "in" : "out", epn->status, ena,
2803 		usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL));
2804 
2805 	if (epn->disabled) {
2806 		dev_warn(epn->udc->dev, "ep%u %s, interrupt while disabled\n",
2807 			epn->id, epn->is_in ? "in" : "out");
2808 		return;
2809 	}
2810 
2811 	sts = epn->status & ena;
2812 
2813 	if (sts & (USBF_EPN_IN_END_INT | USBF_EPN_IN_INT)) {
2814 		sts &= ~(USBF_EPN_IN_END_INT | USBF_EPN_IN_INT);
2815 		dev_dbg(epn->udc->dev, "ep%u %s process queue (in interrupts)\n",
2816 			epn->id, epn->is_in ? "in" : "out");
2817 		usbf_epn_process_queue(epn);
2818 	}
2819 
2820 	if (sts & (USBF_EPN_OUT_END_INT | USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT)) {
2821 		sts &= ~(USBF_EPN_OUT_END_INT | USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
2822 		dev_dbg(epn->udc->dev, "ep%u %s process queue (out interrupts)\n",
2823 			epn->id, epn->is_in ? "in" : "out");
2824 		usbf_epn_process_queue(epn);
2825 	}
2826 
2827 	dev_dbg(epn->udc->dev, "ep%u %s done status=0x%08x. next=0x%08x\n",
2828 		epn->id, epn->is_in ? "in" : "out",
2829 		sts, usbf_ep_reg_readl(epn, USBF_REG_EPN_STATUS));
2830 }
2831 
usbf_ep_reset(struct usbf_ep * ep)2832 static void usbf_ep_reset(struct usbf_ep *ep)
2833 {
2834 	ep->status = 0;
2835 	/* Remove anything that was pending */
2836 	usbf_ep_nuke(ep, -ESHUTDOWN);
2837 }
2838 
usbf_reset(struct usbf_udc * udc)2839 static void usbf_reset(struct usbf_udc *udc)
2840 {
2841 	int i;
2842 
2843 	for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
2844 		if (udc->ep[i].disabled)
2845 			continue;
2846 
2847 		usbf_ep_reset(&udc->ep[i]);
2848 	}
2849 
2850 	if (usbf_reg_readl(udc, USBF_REG_USB_STATUS) & USBF_USB_SPEED_MODE)
2851 		udc->gadget.speed = USB_SPEED_HIGH;
2852 	else
2853 		udc->gadget.speed = USB_SPEED_FULL;
2854 
2855 	/* Remote wakeup feature must be disabled on USB bus reset */
2856 	udc->is_remote_wakeup = false;
2857 
2858 	/* Enable endpoint zero */
2859 	usbf_ep0_enable(&udc->ep[0]);
2860 
2861 	if (udc->driver) {
2862 		/* Signal the reset */
2863 		spin_unlock(&udc->lock);
2864 		usb_gadget_udc_reset(&udc->gadget, udc->driver);
2865 		spin_lock(&udc->lock);
2866 	}
2867 }
2868 
usbf_driver_suspend(struct usbf_udc * udc)2869 static void usbf_driver_suspend(struct usbf_udc *udc)
2870 {
2871 	if (udc->is_usb_suspended) {
2872 		dev_dbg(udc->dev, "already suspended\n");
2873 		return;
2874 	}
2875 
2876 	dev_dbg(udc->dev, "do usb suspend\n");
2877 	udc->is_usb_suspended = true;
2878 
2879 	if (udc->driver && udc->driver->suspend) {
2880 		spin_unlock(&udc->lock);
2881 		udc->driver->suspend(&udc->gadget);
2882 		spin_lock(&udc->lock);
2883 
2884 		/* The datasheet tells to set the USB_CONTROL register SUSPEND
2885 		 * bit when the USB bus suspend is detected.
2886 		 * This bit stops the clocks (clocks for EPC, SIE, USBPHY) but
2887 		 * these clocks seems not used only by the USB device. Some
2888 		 * UARTs can be lost ...
2889 		 * So, do not set the USB_CONTROL register SUSPEND bit.
2890 		 */
2891 	}
2892 }
2893 
usbf_driver_resume(struct usbf_udc * udc)2894 static void usbf_driver_resume(struct usbf_udc *udc)
2895 {
2896 	if (!udc->is_usb_suspended)
2897 		return;
2898 
2899 	dev_dbg(udc->dev, "do usb resume\n");
2900 	udc->is_usb_suspended = false;
2901 
2902 	if (udc->driver && udc->driver->resume) {
2903 		spin_unlock(&udc->lock);
2904 		udc->driver->resume(&udc->gadget);
2905 		spin_lock(&udc->lock);
2906 	}
2907 }
2908 
usbf_epc_irq(int irq,void * _udc)2909 static irqreturn_t usbf_epc_irq(int irq, void *_udc)
2910 {
2911 	struct usbf_udc *udc = (struct usbf_udc *)_udc;
2912 	unsigned long flags;
2913 	struct usbf_ep *ep;
2914 	u32 int_sts;
2915 	u32 int_en;
2916 	int i;
2917 
2918 	spin_lock_irqsave(&udc->lock, flags);
2919 
2920 	int_en = usbf_reg_readl(udc, USBF_REG_USB_INT_ENA);
2921 	int_sts = usbf_reg_readl(udc, USBF_REG_USB_INT_STA) & int_en;
2922 	usbf_reg_writel(udc, USBF_REG_USB_INT_STA, ~int_sts);
2923 
2924 	dev_dbg(udc->dev, "int_sts=0x%08x\n", int_sts);
2925 
2926 	if (int_sts & USBF_USB_RSUM_INT) {
2927 		dev_dbg(udc->dev, "handle resume\n");
2928 		usbf_driver_resume(udc);
2929 	}
2930 
2931 	if (int_sts & USBF_USB_USB_RST_INT) {
2932 		dev_dbg(udc->dev, "handle bus reset\n");
2933 		usbf_driver_resume(udc);
2934 		usbf_reset(udc);
2935 	}
2936 
2937 	if (int_sts & USBF_USB_SPEED_MODE_INT) {
2938 		if (usbf_reg_readl(udc, USBF_REG_USB_STATUS) & USBF_USB_SPEED_MODE)
2939 			udc->gadget.speed = USB_SPEED_HIGH;
2940 		else
2941 			udc->gadget.speed = USB_SPEED_FULL;
2942 		dev_dbg(udc->dev, "handle speed change (%s)\n",
2943 			udc->gadget.speed == USB_SPEED_HIGH ? "High" : "Full");
2944 	}
2945 
2946 	if (int_sts & USBF_USB_EPN_INT(0)) {
2947 		usbf_driver_resume(udc);
2948 		usbf_ep0_interrupt(&udc->ep[0]);
2949 	}
2950 
2951 	for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
2952 		ep = &udc->ep[i];
2953 
2954 		if (int_sts & USBF_USB_EPN_INT(i)) {
2955 			usbf_driver_resume(udc);
2956 			usbf_epn_interrupt(ep);
2957 		}
2958 	}
2959 
2960 	if (int_sts & USBF_USB_SPND_INT) {
2961 		dev_dbg(udc->dev, "handle suspend\n");
2962 		usbf_driver_suspend(udc);
2963 	}
2964 
2965 	spin_unlock_irqrestore(&udc->lock, flags);
2966 
2967 	return IRQ_HANDLED;
2968 }
2969 
usbf_ahb_epc_irq(int irq,void * _udc)2970 static irqreturn_t usbf_ahb_epc_irq(int irq, void *_udc)
2971 {
2972 	struct usbf_udc *udc = (struct usbf_udc *)_udc;
2973 	unsigned long flags;
2974 	struct usbf_ep *epn;
2975 	u32 sysbint;
2976 	void (*ep_action)(struct usbf_ep *epn);
2977 	int i;
2978 
2979 	spin_lock_irqsave(&udc->lock, flags);
2980 
2981 	/* Read and ack interrupts */
2982 	sysbint = usbf_reg_readl(udc, USBF_REG_AHBBINT);
2983 	usbf_reg_writel(udc, USBF_REG_AHBBINT, sysbint);
2984 
2985 	if ((sysbint & USBF_SYS_VBUS_INT) == USBF_SYS_VBUS_INT) {
2986 		if (usbf_reg_readl(udc, USBF_REG_EPCTR) & USBF_SYS_VBUS_LEVEL) {
2987 			dev_dbg(udc->dev, "handle vbus (1)\n");
2988 			spin_unlock(&udc->lock);
2989 			usb_udc_vbus_handler(&udc->gadget, true);
2990 			usb_gadget_set_state(&udc->gadget, USB_STATE_POWERED);
2991 			spin_lock(&udc->lock);
2992 		} else {
2993 			dev_dbg(udc->dev, "handle vbus (0)\n");
2994 			udc->is_usb_suspended = false;
2995 			spin_unlock(&udc->lock);
2996 			usb_udc_vbus_handler(&udc->gadget, false);
2997 			usb_gadget_set_state(&udc->gadget,
2998 					     USB_STATE_NOTATTACHED);
2999 			spin_lock(&udc->lock);
3000 		}
3001 	}
3002 
3003 	for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
3004 		if (sysbint & USBF_SYS_DMA_ENDINT_EPN(i)) {
3005 			epn = &udc->ep[i];
3006 			dev_dbg(epn->udc->dev,
3007 				"ep%u handle DMA complete. action=%ps\n",
3008 				epn->id, epn->bridge_on_dma_end);
3009 			ep_action = epn->bridge_on_dma_end;
3010 			if (ep_action) {
3011 				epn->bridge_on_dma_end = NULL;
3012 				ep_action(epn);
3013 			}
3014 		}
3015 	}
3016 
3017 	spin_unlock_irqrestore(&udc->lock, flags);
3018 
3019 	return IRQ_HANDLED;
3020 }
3021 
usbf_udc_start(struct usb_gadget * gadget,struct usb_gadget_driver * driver)3022 static int usbf_udc_start(struct usb_gadget *gadget,
3023 			  struct usb_gadget_driver *driver)
3024 {
3025 	struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
3026 	unsigned long flags;
3027 
3028 	dev_info(udc->dev, "start (driver '%s')\n", driver->driver.name);
3029 
3030 	spin_lock_irqsave(&udc->lock, flags);
3031 
3032 	/* hook up the driver */
3033 	udc->driver = driver;
3034 
3035 	/* Enable VBUS interrupt */
3036 	usbf_reg_writel(udc, USBF_REG_AHBBINTEN, USBF_SYS_VBUS_INTEN);
3037 
3038 	spin_unlock_irqrestore(&udc->lock, flags);
3039 
3040 	return 0;
3041 }
3042 
usbf_udc_stop(struct usb_gadget * gadget)3043 static int usbf_udc_stop(struct usb_gadget *gadget)
3044 {
3045 	struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
3046 	unsigned long flags;
3047 
3048 	spin_lock_irqsave(&udc->lock, flags);
3049 
3050 	/* Disable VBUS interrupt */
3051 	usbf_reg_writel(udc, USBF_REG_AHBBINTEN, 0);
3052 
3053 	udc->driver = NULL;
3054 
3055 	spin_unlock_irqrestore(&udc->lock, flags);
3056 
3057 	dev_info(udc->dev, "stopped\n");
3058 
3059 	return 0;
3060 }
3061 
usbf_get_frame(struct usb_gadget * gadget)3062 static int usbf_get_frame(struct usb_gadget *gadget)
3063 {
3064 	struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
3065 
3066 	return USBF_USB_GET_FRAME(usbf_reg_readl(udc, USBF_REG_USB_ADDRESS));
3067 }
3068 
usbf_attach(struct usbf_udc * udc)3069 static void usbf_attach(struct usbf_udc *udc)
3070 {
3071 	/* Enable USB signal to Function PHY
3072 	 * D+ signal Pull-up
3073 	 * Disable endpoint 0, it will be automatically enable when a USB reset
3074 	 * is received.
3075 	 * Disable the other endpoints
3076 	 */
3077 	usbf_reg_clrset(udc, USBF_REG_USB_CONTROL,
3078 		USBF_USB_CONNECTB | USBF_USB_DEFAULT | USBF_USB_CONF,
3079 		USBF_USB_PUE2);
3080 
3081 	/* Enable reset and mode change interrupts */
3082 	usbf_reg_bitset(udc, USBF_REG_USB_INT_ENA,
3083 		USBF_USB_USB_RST_EN | USBF_USB_SPEED_MODE_EN | USBF_USB_RSUM_EN | USBF_USB_SPND_EN);
3084 }
3085 
usbf_detach(struct usbf_udc * udc)3086 static void usbf_detach(struct usbf_udc *udc)
3087 {
3088 	int i;
3089 
3090 	/* Disable interrupts */
3091 	usbf_reg_writel(udc, USBF_REG_USB_INT_ENA, 0);
3092 
3093 	for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
3094 		if (udc->ep[i].disabled)
3095 			continue;
3096 
3097 		usbf_ep_reset(&udc->ep[i]);
3098 	}
3099 
3100 	/* Disable USB signal to Function PHY
3101 	 * Do not Pull-up D+ signal
3102 	 * Disable endpoint 0
3103 	 * Disable the other endpoints
3104 	 */
3105 	usbf_reg_clrset(udc, USBF_REG_USB_CONTROL,
3106 		USBF_USB_PUE2 | USBF_USB_DEFAULT | USBF_USB_CONF,
3107 		USBF_USB_CONNECTB);
3108 }
3109 
usbf_pullup(struct usb_gadget * gadget,int is_on)3110 static int usbf_pullup(struct usb_gadget *gadget, int is_on)
3111 {
3112 	struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
3113 	unsigned long flags;
3114 
3115 	dev_dbg(udc->dev, "pullup %d\n", is_on);
3116 
3117 	spin_lock_irqsave(&udc->lock, flags);
3118 	if (is_on)
3119 		usbf_attach(udc);
3120 	else
3121 		usbf_detach(udc);
3122 	spin_unlock_irqrestore(&udc->lock, flags);
3123 
3124 	return 0;
3125 }
3126 
usbf_udc_set_selfpowered(struct usb_gadget * gadget,int is_selfpowered)3127 static int usbf_udc_set_selfpowered(struct usb_gadget *gadget,
3128 				    int is_selfpowered)
3129 {
3130 	struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
3131 	unsigned long flags;
3132 
3133 	spin_lock_irqsave(&udc->lock, flags);
3134 	gadget->is_selfpowered = (is_selfpowered != 0);
3135 	spin_unlock_irqrestore(&udc->lock, flags);
3136 
3137 	return 0;
3138 }
3139 
usbf_udc_wakeup(struct usb_gadget * gadget)3140 static int usbf_udc_wakeup(struct usb_gadget *gadget)
3141 {
3142 	struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
3143 	unsigned long flags;
3144 	int ret;
3145 
3146 	spin_lock_irqsave(&udc->lock, flags);
3147 
3148 	if (!udc->is_remote_wakeup) {
3149 		dev_dbg(udc->dev, "remote wakeup not allowed\n");
3150 		ret = -EINVAL;
3151 		goto end;
3152 	}
3153 
3154 	dev_dbg(udc->dev, "do wakeup\n");
3155 
3156 	/* Send the resume signal */
3157 	usbf_reg_bitset(udc, USBF_REG_USB_CONTROL, USBF_USB_RSUM_IN);
3158 	usbf_reg_bitclr(udc, USBF_REG_USB_CONTROL, USBF_USB_RSUM_IN);
3159 
3160 	ret = 0;
3161 end:
3162 	spin_unlock_irqrestore(&udc->lock, flags);
3163 	return ret;
3164 }
3165 
3166 static struct usb_gadget_ops usbf_gadget_ops = {
3167 	.get_frame = usbf_get_frame,
3168 	.pullup = usbf_pullup,
3169 	.udc_start = usbf_udc_start,
3170 	.udc_stop = usbf_udc_stop,
3171 	.set_selfpowered = usbf_udc_set_selfpowered,
3172 	.wakeup = usbf_udc_wakeup,
3173 };
3174 
usbf_epn_check(struct usbf_ep * epn)3175 static int usbf_epn_check(struct usbf_ep *epn)
3176 {
3177 	const char *type_txt;
3178 	const char *buf_txt;
3179 	int ret = 0;
3180 	u32 ctrl;
3181 
3182 	ctrl = usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL);
3183 
3184 	switch (ctrl & USBF_EPN_MODE_MASK) {
3185 	case USBF_EPN_MODE_BULK:
3186 		type_txt = "bulk";
3187 		if (epn->ep.caps.type_control || epn->ep.caps.type_iso ||
3188 		    !epn->ep.caps.type_bulk || epn->ep.caps.type_int) {
3189 			dev_err(epn->udc->dev,
3190 				"ep%u caps mismatch, bulk expected\n", epn->id);
3191 			ret = -EINVAL;
3192 		}
3193 		break;
3194 	case USBF_EPN_MODE_INTR:
3195 		type_txt = "intr";
3196 		if (epn->ep.caps.type_control || epn->ep.caps.type_iso ||
3197 		    epn->ep.caps.type_bulk || !epn->ep.caps.type_int) {
3198 			dev_err(epn->udc->dev,
3199 				"ep%u caps mismatch, int expected\n", epn->id);
3200 			ret = -EINVAL;
3201 		}
3202 		break;
3203 	case USBF_EPN_MODE_ISO:
3204 		type_txt = "iso";
3205 		if (epn->ep.caps.type_control || !epn->ep.caps.type_iso ||
3206 		    epn->ep.caps.type_bulk || epn->ep.caps.type_int) {
3207 			dev_err(epn->udc->dev,
3208 				"ep%u caps mismatch, iso expected\n", epn->id);
3209 			ret = -EINVAL;
3210 		}
3211 		break;
3212 	default:
3213 		type_txt = "unknown";
3214 		dev_err(epn->udc->dev, "ep%u unknown type\n", epn->id);
3215 		ret = -EINVAL;
3216 		break;
3217 	}
3218 
3219 	if (ctrl & USBF_EPN_BUF_TYPE_DOUBLE) {
3220 		buf_txt = "double";
3221 		if (!usbf_ep_info[epn->id].is_double) {
3222 			dev_err(epn->udc->dev,
3223 				"ep%u buffer mismatch, double expected\n",
3224 				epn->id);
3225 			ret = -EINVAL;
3226 		}
3227 	} else {
3228 		buf_txt = "single";
3229 		if (usbf_ep_info[epn->id].is_double) {
3230 			dev_err(epn->udc->dev,
3231 				"ep%u buffer mismatch, single expected\n",
3232 				epn->id);
3233 			ret = -EINVAL;
3234 		}
3235 	}
3236 
3237 	dev_dbg(epn->udc->dev, "ep%u (%s) %s, %s buffer %u, checked %s\n",
3238 		 epn->id, epn->ep.name, type_txt, buf_txt,
3239 		 epn->ep.maxpacket_limit, ret ? "failed" : "ok");
3240 
3241 	return ret;
3242 }
3243 
usbf_probe(struct platform_device * pdev)3244 static int usbf_probe(struct platform_device *pdev)
3245 {
3246 	struct device *dev = &pdev->dev;
3247 	struct usbf_udc *udc;
3248 	struct usbf_ep *ep;
3249 	unsigned int i;
3250 	int irq;
3251 	int ret;
3252 
3253 	udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
3254 	if (!udc)
3255 		return -ENOMEM;
3256 	platform_set_drvdata(pdev, udc);
3257 
3258 	udc->dev = dev;
3259 	spin_lock_init(&udc->lock);
3260 
3261 	udc->regs = devm_platform_ioremap_resource(pdev, 0);
3262 	if (IS_ERR(udc->regs))
3263 		return PTR_ERR(udc->regs);
3264 
3265 	devm_pm_runtime_enable(&pdev->dev);
3266 	ret = pm_runtime_resume_and_get(&pdev->dev);
3267 	if (ret < 0)
3268 		return ret;
3269 
3270 	dev_info(dev, "USBF version: %08x\n",
3271 		usbf_reg_readl(udc, USBF_REG_USBSSVER));
3272 
3273 	/* Resetting the PLL is handled via the clock driver as it has common
3274 	 * registers with USB Host
3275 	 */
3276 	usbf_reg_bitclr(udc, USBF_REG_EPCTR, USBF_SYS_EPC_RST);
3277 
3278 	/* modify in register gadget process */
3279 	udc->gadget.speed = USB_SPEED_FULL;
3280 	udc->gadget.max_speed = USB_SPEED_HIGH;
3281 	udc->gadget.ops = &usbf_gadget_ops;
3282 
3283 	udc->gadget.name = dev->driver->name;
3284 	udc->gadget.dev.parent = dev;
3285 	udc->gadget.ep0 = &udc->ep[0].ep;
3286 
3287 	/* The hardware DMA controller needs dma addresses aligned on 32bit.
3288 	 * A fallback to pio is done if DMA addresses are not aligned.
3289 	 */
3290 	udc->gadget.quirk_avoids_skb_reserve = 1;
3291 
3292 	INIT_LIST_HEAD(&udc->gadget.ep_list);
3293 	/* we have a canned request structure to allow sending packets as reply
3294 	 * to get_status requests
3295 	 */
3296 	INIT_LIST_HEAD(&udc->setup_reply.queue);
3297 
3298 	for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
3299 		ep = &udc->ep[i];
3300 
3301 		if (!(usbf_reg_readl(udc, USBF_REG_USBSSCONF) &
3302 		      USBF_SYS_EP_AVAILABLE(i))) {
3303 			continue;
3304 		}
3305 
3306 		INIT_LIST_HEAD(&ep->queue);
3307 
3308 		ep->id = i;
3309 		ep->disabled = 1;
3310 		ep->udc = udc;
3311 		ep->ep.ops = &usbf_ep_ops;
3312 		ep->ep.name = usbf_ep_info[i].name;
3313 		ep->ep.caps = usbf_ep_info[i].caps;
3314 		usb_ep_set_maxpacket_limit(&ep->ep,
3315 					   usbf_ep_info[i].maxpacket_limit);
3316 
3317 		if (ep->id == 0) {
3318 			ep->regs = ep->udc->regs + USBF_BASE_EP0;
3319 		} else {
3320 			ep->regs = ep->udc->regs + USBF_BASE_EPN(ep->id - 1);
3321 			ret = usbf_epn_check(ep);
3322 			if (ret)
3323 				return ret;
3324 			if (usbf_reg_readl(udc, USBF_REG_USBSSCONF) &
3325 			    USBF_SYS_DMA_AVAILABLE(i)) {
3326 				ep->dma_regs = ep->udc->regs +
3327 					       USBF_BASE_DMA_EPN(ep->id - 1);
3328 			}
3329 			list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
3330 		}
3331 	}
3332 
3333 	irq = platform_get_irq(pdev, 0);
3334 	if (irq < 0)
3335 		return irq;
3336 	ret = devm_request_irq(dev, irq, usbf_epc_irq, 0, "usbf-epc", udc);
3337 	if (ret) {
3338 		dev_err(dev, "cannot request irq %d err %d\n", irq, ret);
3339 		return ret;
3340 	}
3341 
3342 	irq = platform_get_irq(pdev, 1);
3343 	if (irq < 0)
3344 		return irq;
3345 	ret = devm_request_irq(dev, irq, usbf_ahb_epc_irq, 0, "usbf-ahb-epc", udc);
3346 	if (ret) {
3347 		dev_err(dev, "cannot request irq %d err %d\n", irq, ret);
3348 		return ret;
3349 	}
3350 
3351 	usbf_reg_bitset(udc, USBF_REG_AHBMCTR, USBF_SYS_WBURST_TYPE);
3352 
3353 	usbf_reg_bitset(udc, USBF_REG_USB_CONTROL,
3354 		USBF_USB_INT_SEL | USBF_USB_SOF_RCV | USBF_USB_SOF_CLK_MODE);
3355 
3356 	ret = usb_add_gadget_udc(dev, &udc->gadget);
3357 	if (ret)
3358 		return ret;
3359 
3360 	return 0;
3361 }
3362 
usbf_remove(struct platform_device * pdev)3363 static void usbf_remove(struct platform_device *pdev)
3364 {
3365 	struct usbf_udc *udc = platform_get_drvdata(pdev);
3366 
3367 	usb_del_gadget_udc(&udc->gadget);
3368 
3369 	pm_runtime_put(&pdev->dev);
3370 }
3371 
3372 static const struct of_device_id usbf_match[] = {
3373 	{ .compatible = "renesas,rzn1-usbf" },
3374 	{} /* sentinel */
3375 };
3376 MODULE_DEVICE_TABLE(of, usbf_match);
3377 
3378 static struct platform_driver udc_driver = {
3379 	.driver = {
3380 		.name = "usbf_renesas",
3381 		.of_match_table = usbf_match,
3382 	},
3383 	.probe          = usbf_probe,
3384 	.remove_new     = usbf_remove,
3385 };
3386 
3387 module_platform_driver(udc_driver);
3388 
3389 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
3390 MODULE_DESCRIPTION("Renesas R-Car Gen3 & RZ/N1 USB Function driver");
3391 MODULE_LICENSE("GPL");
3392