xref: /openbmc/u-boot/drivers/usb/dwc3/gadget.c (revision d6736689)
1 // SPDX-License-Identifier: GPL-2.0
2 /**
3  * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
4  *
5  * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
6  *
7  * Authors: Felipe Balbi <balbi@ti.com>,
8  *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
9  *
10  * Taken from Linux Kernel v3.19-rc1 (drivers/usb/dwc3/gadget.c) and ported
11  * to uboot.
12  *
13  * commit 8e74475b0e : usb: dwc3: gadget: use udc-core's reset notifier
14  */
15 
16 #include <common.h>
17 #include <malloc.h>
18 #include <asm/dma-mapping.h>
19 #include <usb/lin_gadget_compat.h>
20 #include <linux/bug.h>
21 #include <linux/list.h>
22 
23 #include <linux/usb/ch9.h>
24 #include <linux/usb/gadget.h>
25 
26 #include "core.h"
27 #include "gadget.h"
28 #include "io.h"
29 
30 #include "linux-compat.h"
31 
32 /**
33  * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
34  * @dwc: pointer to our context structure
35  * @mode: the mode to set (J, K SE0 NAK, Force Enable)
36  *
37  * Caller should take care of locking. This function will
38  * return 0 on success or -EINVAL if wrong Test Selector
39  * is passed
40  */
41 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
42 {
43 	u32		reg;
44 
45 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
46 	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
47 
48 	switch (mode) {
49 	case TEST_J:
50 	case TEST_K:
51 	case TEST_SE0_NAK:
52 	case TEST_PACKET:
53 	case TEST_FORCE_EN:
54 		reg |= mode << 1;
55 		break;
56 	default:
57 		return -EINVAL;
58 	}
59 
60 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
61 
62 	return 0;
63 }
64 
65 /**
66  * dwc3_gadget_get_link_state - Gets current state of USB Link
67  * @dwc: pointer to our context structure
68  *
69  * Caller should take care of locking. This function will
70  * return the link state on success (>= 0) or -ETIMEDOUT.
71  */
72 int dwc3_gadget_get_link_state(struct dwc3 *dwc)
73 {
74 	u32		reg;
75 
76 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
77 
78 	return DWC3_DSTS_USBLNKST(reg);
79 }
80 
81 /**
82  * dwc3_gadget_set_link_state - Sets USB Link to a particular State
83  * @dwc: pointer to our context structure
84  * @state: the state to put link into
85  *
86  * Caller should take care of locking. This function will
87  * return 0 on success or -ETIMEDOUT.
88  */
89 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
90 {
91 	int		retries = 10000;
92 	u32		reg;
93 
94 	/*
95 	 * Wait until device controller is ready. Only applies to 1.94a and
96 	 * later RTL.
97 	 */
98 	if (dwc->revision >= DWC3_REVISION_194A) {
99 		while (--retries) {
100 			reg = dwc3_readl(dwc->regs, DWC3_DSTS);
101 			if (reg & DWC3_DSTS_DCNRD)
102 				udelay(5);
103 			else
104 				break;
105 		}
106 
107 		if (retries <= 0)
108 			return -ETIMEDOUT;
109 	}
110 
111 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
112 	reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
113 
114 	/* set requested state */
115 	reg |= DWC3_DCTL_ULSTCHNGREQ(state);
116 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
117 
118 	/*
119 	 * The following code is racy when called from dwc3_gadget_wakeup,
120 	 * and is not needed, at least on newer versions
121 	 */
122 	if (dwc->revision >= DWC3_REVISION_194A)
123 		return 0;
124 
125 	/* wait for a change in DSTS */
126 	retries = 10000;
127 	while (--retries) {
128 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
129 
130 		if (DWC3_DSTS_USBLNKST(reg) == state)
131 			return 0;
132 
133 		udelay(5);
134 	}
135 
136 	dev_vdbg(dwc->dev, "link state change request timed out\n");
137 
138 	return -ETIMEDOUT;
139 }
140 
141 /**
142  * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
143  * @dwc: pointer to our context structure
144  *
145  * This function will a best effort FIFO allocation in order
146  * to improve FIFO usage and throughput, while still allowing
147  * us to enable as many endpoints as possible.
148  *
149  * Keep in mind that this operation will be highly dependent
150  * on the configured size for RAM1 - which contains TxFifo -,
151  * the amount of endpoints enabled on coreConsultant tool, and
152  * the width of the Master Bus.
153  *
154  * In the ideal world, we would always be able to satisfy the
155  * following equation:
156  *
157  * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
158  * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
159  *
160  * Unfortunately, due to many variables that's not always the case.
161  */
162 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
163 {
164 	int		last_fifo_depth = 0;
165 	int		fifo_size;
166 	int		mdwidth;
167 	int		num;
168 
169 	if (!dwc->needs_fifo_resize)
170 		return 0;
171 
172 	mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
173 
174 	/* MDWIDTH is represented in bits, we need it in bytes */
175 	mdwidth >>= 3;
176 
177 	/*
178 	 * FIXME For now we will only allocate 1 wMaxPacketSize space
179 	 * for each enabled endpoint, later patches will come to
180 	 * improve this algorithm so that we better use the internal
181 	 * FIFO space
182 	 */
183 	for (num = 0; num < dwc->num_in_eps; num++) {
184 		/* bit0 indicates direction; 1 means IN ep */
185 		struct dwc3_ep	*dep = dwc->eps[(num << 1) | 1];
186 		int		mult = 1;
187 		int		tmp;
188 
189 		if (!(dep->flags & DWC3_EP_ENABLED))
190 			continue;
191 
192 		if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
193 				|| usb_endpoint_xfer_isoc(dep->endpoint.desc))
194 			mult = 3;
195 
196 		/*
197 		 * REVISIT: the following assumes we will always have enough
198 		 * space available on the FIFO RAM for all possible use cases.
199 		 * Make sure that's true somehow and change FIFO allocation
200 		 * accordingly.
201 		 *
202 		 * If we have Bulk or Isochronous endpoints, we want
203 		 * them to be able to be very, very fast. So we're giving
204 		 * those endpoints a fifo_size which is enough for 3 full
205 		 * packets
206 		 */
207 		tmp = mult * (dep->endpoint.maxpacket + mdwidth);
208 		tmp += mdwidth;
209 
210 		fifo_size = DIV_ROUND_UP(tmp, mdwidth);
211 
212 		fifo_size |= (last_fifo_depth << 16);
213 
214 		dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
215 				dep->name, last_fifo_depth, fifo_size & 0xffff);
216 
217 		dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
218 
219 		last_fifo_depth += (fifo_size & 0xffff);
220 	}
221 
222 	return 0;
223 }
224 
225 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
226 		int status)
227 {
228 	struct dwc3			*dwc = dep->dwc;
229 
230 	if (req->queued) {
231 		dep->busy_slot++;
232 		/*
233 		 * Skip LINK TRB. We can't use req->trb and check for
234 		 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
235 		 * just completed (not the LINK TRB).
236 		 */
237 		if (((dep->busy_slot & DWC3_TRB_MASK) ==
238 			DWC3_TRB_NUM- 1) &&
239 			usb_endpoint_xfer_isoc(dep->endpoint.desc))
240 			dep->busy_slot++;
241 		req->queued = false;
242 	}
243 
244 	list_del(&req->list);
245 	req->trb = NULL;
246 	dwc3_flush_cache((uintptr_t)req->request.dma, req->request.length);
247 
248 	if (req->request.status == -EINPROGRESS)
249 		req->request.status = status;
250 
251 	if (dwc->ep0_bounced && dep->number == 0)
252 		dwc->ep0_bounced = false;
253 	else
254 		usb_gadget_unmap_request(&dwc->gadget, &req->request,
255 				req->direction);
256 
257 	dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
258 			req, dep->name, req->request.actual,
259 			req->request.length, status);
260 
261 	spin_unlock(&dwc->lock);
262 	usb_gadget_giveback_request(&dep->endpoint, &req->request);
263 	spin_lock(&dwc->lock);
264 }
265 
266 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
267 {
268 	u32		timeout = 500;
269 	u32		reg;
270 
271 	dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
272 	dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
273 
274 	do {
275 		reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
276 		if (!(reg & DWC3_DGCMD_CMDACT)) {
277 			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
278 					DWC3_DGCMD_STATUS(reg));
279 			return 0;
280 		}
281 
282 		/*
283 		 * We can't sleep here, because it's also called from
284 		 * interrupt context.
285 		 */
286 		timeout--;
287 		if (!timeout)
288 			return -ETIMEDOUT;
289 		udelay(1);
290 	} while (1);
291 }
292 
293 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
294 		unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
295 {
296 	u32			timeout = 500;
297 	u32			reg;
298 
299 	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
300 	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
301 	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
302 
303 	dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
304 	do {
305 		reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
306 		if (!(reg & DWC3_DEPCMD_CMDACT)) {
307 			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
308 					DWC3_DEPCMD_STATUS(reg));
309 			return 0;
310 		}
311 
312 		/*
313 		 * We can't sleep here, because it is also called from
314 		 * interrupt context.
315 		 */
316 		timeout--;
317 		if (!timeout)
318 			return -ETIMEDOUT;
319 
320 		udelay(1);
321 	} while (1);
322 }
323 
324 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
325 		struct dwc3_trb *trb)
326 {
327 	u32		offset = (char *) trb - (char *) dep->trb_pool;
328 
329 	return dep->trb_pool_dma + offset;
330 }
331 
332 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
333 {
334 	if (dep->trb_pool)
335 		return 0;
336 
337 	if (dep->number == 0 || dep->number == 1)
338 		return 0;
339 
340 	dep->trb_pool = dma_alloc_coherent(sizeof(struct dwc3_trb) *
341 					   DWC3_TRB_NUM,
342 					   (unsigned long *)&dep->trb_pool_dma);
343 	if (!dep->trb_pool) {
344 		dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
345 				dep->name);
346 		return -ENOMEM;
347 	}
348 
349 	return 0;
350 }
351 
352 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
353 {
354 	dma_free_coherent(dep->trb_pool);
355 
356 	dep->trb_pool = NULL;
357 	dep->trb_pool_dma = 0;
358 }
359 
360 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
361 {
362 	struct dwc3_gadget_ep_cmd_params params;
363 	u32			cmd;
364 
365 	memset(&params, 0x00, sizeof(params));
366 
367 	if (dep->number != 1) {
368 		cmd = DWC3_DEPCMD_DEPSTARTCFG;
369 		/* XferRscIdx == 0 for ep0 and 2 for the remaining */
370 		if (dep->number > 1) {
371 			if (dwc->start_config_issued)
372 				return 0;
373 			dwc->start_config_issued = true;
374 			cmd |= DWC3_DEPCMD_PARAM(2);
375 		}
376 
377 		return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
378 	}
379 
380 	return 0;
381 }
382 
383 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
384 		const struct usb_endpoint_descriptor *desc,
385 		const struct usb_ss_ep_comp_descriptor *comp_desc,
386 		bool ignore, bool restore)
387 {
388 	struct dwc3_gadget_ep_cmd_params params;
389 
390 	memset(&params, 0x00, sizeof(params));
391 
392 	params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
393 		| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
394 
395 	/* Burst size is only needed in SuperSpeed mode */
396 	if (dwc->gadget.speed == USB_SPEED_SUPER) {
397 		u32 burst = dep->endpoint.maxburst - 1;
398 
399 		params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
400 	}
401 
402 	if (ignore)
403 		params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
404 
405 	if (restore) {
406 		params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
407 		params.param2 |= dep->saved_state;
408 	}
409 
410 	params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
411 		| DWC3_DEPCFG_XFER_NOT_READY_EN;
412 
413 	if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
414 		params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
415 			| DWC3_DEPCFG_STREAM_EVENT_EN;
416 		dep->stream_capable = true;
417 	}
418 
419 	if (!usb_endpoint_xfer_control(desc))
420 		params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
421 
422 	/*
423 	 * We are doing 1:1 mapping for endpoints, meaning
424 	 * Physical Endpoints 2 maps to Logical Endpoint 2 and
425 	 * so on. We consider the direction bit as part of the physical
426 	 * endpoint number. So USB endpoint 0x81 is 0x03.
427 	 */
428 	params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
429 
430 	/*
431 	 * We must use the lower 16 TX FIFOs even though
432 	 * HW might have more
433 	 */
434 	if (dep->direction)
435 		params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
436 
437 	if (desc->bInterval) {
438 		params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
439 		dep->interval = 1 << (desc->bInterval - 1);
440 	}
441 
442 	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
443 			DWC3_DEPCMD_SETEPCONFIG, &params);
444 }
445 
446 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
447 {
448 	struct dwc3_gadget_ep_cmd_params params;
449 
450 	memset(&params, 0x00, sizeof(params));
451 
452 	params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
453 
454 	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
455 			DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
456 }
457 
458 /**
459  * __dwc3_gadget_ep_enable - Initializes a HW endpoint
460  * @dep: endpoint to be initialized
461  * @desc: USB Endpoint Descriptor
462  *
463  * Caller should take care of locking
464  */
465 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
466 		const struct usb_endpoint_descriptor *desc,
467 		const struct usb_ss_ep_comp_descriptor *comp_desc,
468 		bool ignore, bool restore)
469 {
470 	struct dwc3		*dwc = dep->dwc;
471 	u32			reg;
472 	int			ret;
473 
474 	dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
475 
476 	if (!(dep->flags & DWC3_EP_ENABLED)) {
477 		ret = dwc3_gadget_start_config(dwc, dep);
478 		if (ret)
479 			return ret;
480 	}
481 
482 	ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
483 			restore);
484 	if (ret)
485 		return ret;
486 
487 	if (!(dep->flags & DWC3_EP_ENABLED)) {
488 		struct dwc3_trb	*trb_st_hw;
489 		struct dwc3_trb	*trb_link;
490 
491 		ret = dwc3_gadget_set_xfer_resource(dwc, dep);
492 		if (ret)
493 			return ret;
494 
495 		dep->endpoint.desc = desc;
496 		dep->comp_desc = comp_desc;
497 		dep->type = usb_endpoint_type(desc);
498 		dep->flags |= DWC3_EP_ENABLED;
499 
500 		reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
501 		reg |= DWC3_DALEPENA_EP(dep->number);
502 		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
503 
504 		if (!usb_endpoint_xfer_isoc(desc))
505 			return 0;
506 
507 		/* Link TRB for ISOC. The HWO bit is never reset */
508 		trb_st_hw = &dep->trb_pool[0];
509 
510 		trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
511 		memset(trb_link, 0, sizeof(*trb_link));
512 
513 		trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
514 		trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
515 		trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
516 		trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
517 	}
518 
519 	return 0;
520 }
521 
522 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
523 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
524 {
525 	struct dwc3_request		*req;
526 
527 	if (!list_empty(&dep->req_queued)) {
528 		dwc3_stop_active_transfer(dwc, dep->number, true);
529 
530 		/* - giveback all requests to gadget driver */
531 		while (!list_empty(&dep->req_queued)) {
532 			req = next_request(&dep->req_queued);
533 
534 			dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
535 		}
536 	}
537 
538 	while (!list_empty(&dep->request_list)) {
539 		req = next_request(&dep->request_list);
540 
541 		dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
542 	}
543 }
544 
545 /**
546  * __dwc3_gadget_ep_disable - Disables a HW endpoint
547  * @dep: the endpoint to disable
548  *
549  * This function also removes requests which are currently processed ny the
550  * hardware and those which are not yet scheduled.
551  * Caller should take care of locking.
552  */
553 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
554 {
555 	struct dwc3		*dwc = dep->dwc;
556 	u32			reg;
557 
558 	dwc3_remove_requests(dwc, dep);
559 
560 	/* make sure HW endpoint isn't stalled */
561 	if (dep->flags & DWC3_EP_STALL)
562 		__dwc3_gadget_ep_set_halt(dep, 0, false);
563 
564 	reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
565 	reg &= ~DWC3_DALEPENA_EP(dep->number);
566 	dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
567 
568 	dep->stream_capable = false;
569 	dep->endpoint.desc = NULL;
570 	dep->comp_desc = NULL;
571 	dep->type = 0;
572 	dep->flags = 0;
573 
574 	return 0;
575 }
576 
577 /* -------------------------------------------------------------------------- */
578 
579 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
580 		const struct usb_endpoint_descriptor *desc)
581 {
582 	return -EINVAL;
583 }
584 
585 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
586 {
587 	return -EINVAL;
588 }
589 
590 /* -------------------------------------------------------------------------- */
591 
592 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
593 		const struct usb_endpoint_descriptor *desc)
594 {
595 	struct dwc3_ep			*dep;
596 	unsigned long			flags;
597 	int				ret;
598 
599 	if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
600 		pr_debug("dwc3: invalid parameters\n");
601 		return -EINVAL;
602 	}
603 
604 	if (!desc->wMaxPacketSize) {
605 		pr_debug("dwc3: missing wMaxPacketSize\n");
606 		return -EINVAL;
607 	}
608 
609 	dep = to_dwc3_ep(ep);
610 
611 	if (dep->flags & DWC3_EP_ENABLED) {
612 		WARN(true, "%s is already enabled\n",
613 				dep->name);
614 		return 0;
615 	}
616 
617 	switch (usb_endpoint_type(desc)) {
618 	case USB_ENDPOINT_XFER_CONTROL:
619 		strlcat(dep->name, "-control", sizeof(dep->name));
620 		break;
621 	case USB_ENDPOINT_XFER_ISOC:
622 		strlcat(dep->name, "-isoc", sizeof(dep->name));
623 		break;
624 	case USB_ENDPOINT_XFER_BULK:
625 		strlcat(dep->name, "-bulk", sizeof(dep->name));
626 		break;
627 	case USB_ENDPOINT_XFER_INT:
628 		strlcat(dep->name, "-int", sizeof(dep->name));
629 		break;
630 	default:
631 		dev_err(dwc->dev, "invalid endpoint transfer type\n");
632 	}
633 
634 	spin_lock_irqsave(&dwc->lock, flags);
635 	ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
636 	spin_unlock_irqrestore(&dwc->lock, flags);
637 
638 	return ret;
639 }
640 
641 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
642 {
643 	struct dwc3_ep			*dep;
644 	unsigned long			flags;
645 	int				ret;
646 
647 	if (!ep) {
648 		pr_debug("dwc3: invalid parameters\n");
649 		return -EINVAL;
650 	}
651 
652 	dep = to_dwc3_ep(ep);
653 
654 	if (!(dep->flags & DWC3_EP_ENABLED)) {
655 		WARN(true, "%s is already disabled\n",
656 				dep->name);
657 		return 0;
658 	}
659 
660 	snprintf(dep->name, sizeof(dep->name), "ep%d%s",
661 			dep->number >> 1,
662 			(dep->number & 1) ? "in" : "out");
663 
664 	spin_lock_irqsave(&dwc->lock, flags);
665 	ret = __dwc3_gadget_ep_disable(dep);
666 	spin_unlock_irqrestore(&dwc->lock, flags);
667 
668 	return ret;
669 }
670 
671 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
672 	gfp_t gfp_flags)
673 {
674 	struct dwc3_request		*req;
675 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
676 
677 	req = kzalloc(sizeof(*req), gfp_flags);
678 	if (!req)
679 		return NULL;
680 
681 	req->epnum	= dep->number;
682 	req->dep	= dep;
683 
684 	return &req->request;
685 }
686 
687 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
688 		struct usb_request *request)
689 {
690 	struct dwc3_request		*req = to_dwc3_request(request);
691 
692 	kfree(req);
693 }
694 
695 /**
696  * dwc3_prepare_one_trb - setup one TRB from one request
697  * @dep: endpoint for which this request is prepared
698  * @req: dwc3_request pointer
699  */
700 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
701 		struct dwc3_request *req, dma_addr_t dma,
702 		unsigned length, unsigned last, unsigned chain, unsigned node)
703 {
704 	struct dwc3_trb		*trb;
705 
706 	dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
707 			dep->name, req, (unsigned long long) dma,
708 			length, last ? " last" : "",
709 			chain ? " chain" : "");
710 
711 
712 	trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
713 
714 	if (!req->trb) {
715 		dwc3_gadget_move_request_queued(req);
716 		req->trb = trb;
717 		req->trb_dma = dwc3_trb_dma_offset(dep, trb);
718 		req->start_slot = dep->free_slot & DWC3_TRB_MASK;
719 	}
720 
721 	dep->free_slot++;
722 	/* Skip the LINK-TRB on ISOC */
723 	if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
724 			usb_endpoint_xfer_isoc(dep->endpoint.desc))
725 		dep->free_slot++;
726 
727 	trb->size = DWC3_TRB_SIZE_LENGTH(length);
728 	trb->bpl = lower_32_bits(dma);
729 	trb->bph = upper_32_bits(dma);
730 
731 	switch (usb_endpoint_type(dep->endpoint.desc)) {
732 	case USB_ENDPOINT_XFER_CONTROL:
733 		trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
734 		break;
735 
736 	case USB_ENDPOINT_XFER_ISOC:
737 		if (!node)
738 			trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
739 		else
740 			trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
741 		break;
742 
743 	case USB_ENDPOINT_XFER_BULK:
744 	case USB_ENDPOINT_XFER_INT:
745 		trb->ctrl = DWC3_TRBCTL_NORMAL;
746 		break;
747 	default:
748 		/*
749 		 * This is only possible with faulty memory because we
750 		 * checked it already :)
751 		 */
752 		BUG();
753 	}
754 
755 	if (!req->request.no_interrupt && !chain)
756 		trb->ctrl |= DWC3_TRB_CTRL_IOC;
757 
758 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
759 		trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
760 		trb->ctrl |= DWC3_TRB_CTRL_CSP;
761 	} else if (last) {
762 		trb->ctrl |= DWC3_TRB_CTRL_LST;
763 	}
764 
765 	if (chain)
766 		trb->ctrl |= DWC3_TRB_CTRL_CHN;
767 
768 	if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
769 		trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
770 
771 	trb->ctrl |= DWC3_TRB_CTRL_HWO;
772 
773 	dwc3_flush_cache((uintptr_t)dma, length);
774 	dwc3_flush_cache((uintptr_t)trb, sizeof(*trb));
775 }
776 
777 /*
778  * dwc3_prepare_trbs - setup TRBs from requests
779  * @dep: endpoint for which requests are being prepared
780  * @starting: true if the endpoint is idle and no requests are queued.
781  *
782  * The function goes through the requests list and sets up TRBs for the
783  * transfers. The function returns once there are no more TRBs available or
784  * it runs out of requests.
785  */
786 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
787 {
788 	struct dwc3_request	*req, *n;
789 	u32			trbs_left;
790 	u32			max;
791 
792 	BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
793 
794 	/* the first request must not be queued */
795 	trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
796 
797 	/* Can't wrap around on a non-isoc EP since there's no link TRB */
798 	if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
799 		max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
800 		if (trbs_left > max)
801 			trbs_left = max;
802 	}
803 
804 	/*
805 	 * If busy & slot are equal than it is either full or empty. If we are
806 	 * starting to process requests then we are empty. Otherwise we are
807 	 * full and don't do anything
808 	 */
809 	if (!trbs_left) {
810 		if (!starting)
811 			return;
812 		trbs_left = DWC3_TRB_NUM;
813 		/*
814 		 * In case we start from scratch, we queue the ISOC requests
815 		 * starting from slot 1. This is done because we use ring
816 		 * buffer and have no LST bit to stop us. Instead, we place
817 		 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
818 		 * after the first request so we start at slot 1 and have
819 		 * 7 requests proceed before we hit the first IOC.
820 		 * Other transfer types don't use the ring buffer and are
821 		 * processed from the first TRB until the last one. Since we
822 		 * don't wrap around we have to start at the beginning.
823 		 */
824 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
825 			dep->busy_slot = 1;
826 			dep->free_slot = 1;
827 		} else {
828 			dep->busy_slot = 0;
829 			dep->free_slot = 0;
830 		}
831 	}
832 
833 	/* The last TRB is a link TRB, not used for xfer */
834 	if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
835 		return;
836 
837 	list_for_each_entry_safe(req, n, &dep->request_list, list) {
838 		unsigned	length;
839 		dma_addr_t	dma;
840 
841 		dma = req->request.dma;
842 		length = req->request.length;
843 
844 		dwc3_prepare_one_trb(dep, req, dma, length,
845 				     true, false, 0);
846 
847 		break;
848 	}
849 }
850 
851 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
852 		int start_new)
853 {
854 	struct dwc3_gadget_ep_cmd_params params;
855 	struct dwc3_request		*req;
856 	struct dwc3			*dwc = dep->dwc;
857 	int				ret;
858 	u32				cmd;
859 
860 	if (start_new && (dep->flags & DWC3_EP_BUSY)) {
861 		dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
862 		return -EBUSY;
863 	}
864 	dep->flags &= ~DWC3_EP_PENDING_REQUEST;
865 
866 	/*
867 	 * If we are getting here after a short-out-packet we don't enqueue any
868 	 * new requests as we try to set the IOC bit only on the last request.
869 	 */
870 	if (start_new) {
871 		if (list_empty(&dep->req_queued))
872 			dwc3_prepare_trbs(dep, start_new);
873 
874 		/* req points to the first request which will be sent */
875 		req = next_request(&dep->req_queued);
876 	} else {
877 		dwc3_prepare_trbs(dep, start_new);
878 
879 		/*
880 		 * req points to the first request where HWO changed from 0 to 1
881 		 */
882 		req = next_request(&dep->req_queued);
883 	}
884 	if (!req) {
885 		dep->flags |= DWC3_EP_PENDING_REQUEST;
886 		return 0;
887 	}
888 
889 	memset(&params, 0, sizeof(params));
890 
891 	if (start_new) {
892 		params.param0 = upper_32_bits(req->trb_dma);
893 		params.param1 = lower_32_bits(req->trb_dma);
894 		cmd = DWC3_DEPCMD_STARTTRANSFER;
895 	} else {
896 		cmd = DWC3_DEPCMD_UPDATETRANSFER;
897 	}
898 
899 	cmd |= DWC3_DEPCMD_PARAM(cmd_param);
900 	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
901 	if (ret < 0) {
902 		dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
903 
904 		/*
905 		 * FIXME we need to iterate over the list of requests
906 		 * here and stop, unmap, free and del each of the linked
907 		 * requests instead of what we do now.
908 		 */
909 		usb_gadget_unmap_request(&dwc->gadget, &req->request,
910 				req->direction);
911 		list_del(&req->list);
912 		return ret;
913 	}
914 
915 	dep->flags |= DWC3_EP_BUSY;
916 
917 	if (start_new) {
918 		dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
919 				dep->number);
920 		WARN_ON_ONCE(!dep->resource_index);
921 	}
922 
923 	return 0;
924 }
925 
926 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
927 		struct dwc3_ep *dep, u32 cur_uf)
928 {
929 	u32 uf;
930 
931 	if (list_empty(&dep->request_list)) {
932 		dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
933 			dep->name);
934 		dep->flags |= DWC3_EP_PENDING_REQUEST;
935 		return;
936 	}
937 
938 	/* 4 micro frames in the future */
939 	uf = cur_uf + dep->interval * 4;
940 
941 	__dwc3_gadget_kick_transfer(dep, uf, 1);
942 }
943 
944 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
945 		struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
946 {
947 	u32 cur_uf, mask;
948 
949 	mask = ~(dep->interval - 1);
950 	cur_uf = event->parameters & mask;
951 
952 	__dwc3_gadget_start_isoc(dwc, dep, cur_uf);
953 }
954 
955 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
956 {
957 	struct dwc3		*dwc = dep->dwc;
958 	int			ret;
959 
960 	req->request.actual	= 0;
961 	req->request.status	= -EINPROGRESS;
962 	req->direction		= dep->direction;
963 	req->epnum		= dep->number;
964 
965 	/*
966 	 * DWC3 hangs on OUT requests smaller than maxpacket size,
967 	 * so HACK the request length
968 	 */
969 	if (dep->direction == 0 &&
970 	    req->request.length < dep->endpoint.maxpacket)
971 		req->request.length = dep->endpoint.maxpacket;
972 
973 	/*
974 	 * We only add to our list of requests now and
975 	 * start consuming the list once we get XferNotReady
976 	 * IRQ.
977 	 *
978 	 * That way, we avoid doing anything that we don't need
979 	 * to do now and defer it until the point we receive a
980 	 * particular token from the Host side.
981 	 *
982 	 * This will also avoid Host cancelling URBs due to too
983 	 * many NAKs.
984 	 */
985 	ret = usb_gadget_map_request(&dwc->gadget, &req->request,
986 			dep->direction);
987 	if (ret)
988 		return ret;
989 
990 	list_add_tail(&req->list, &dep->request_list);
991 
992 	/*
993 	 * There are a few special cases:
994 	 *
995 	 * 1. XferNotReady with empty list of requests. We need to kick the
996 	 *    transfer here in that situation, otherwise we will be NAKing
997 	 *    forever. If we get XferNotReady before gadget driver has a
998 	 *    chance to queue a request, we will ACK the IRQ but won't be
999 	 *    able to receive the data until the next request is queued.
1000 	 *    The following code is handling exactly that.
1001 	 *
1002 	 */
1003 	if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1004 		/*
1005 		 * If xfernotready is already elapsed and it is a case
1006 		 * of isoc transfer, then issue END TRANSFER, so that
1007 		 * you can receive xfernotready again and can have
1008 		 * notion of current microframe.
1009 		 */
1010 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1011 			if (list_empty(&dep->req_queued)) {
1012 				dwc3_stop_active_transfer(dwc, dep->number, true);
1013 				dep->flags = DWC3_EP_ENABLED;
1014 			}
1015 			return 0;
1016 		}
1017 
1018 		ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1019 		if (ret && ret != -EBUSY)
1020 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1021 					dep->name);
1022 		return ret;
1023 	}
1024 
1025 	/*
1026 	 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1027 	 *    kick the transfer here after queuing a request, otherwise the
1028 	 *    core may not see the modified TRB(s).
1029 	 */
1030 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1031 			(dep->flags & DWC3_EP_BUSY) &&
1032 			!(dep->flags & DWC3_EP_MISSED_ISOC)) {
1033 		WARN_ON_ONCE(!dep->resource_index);
1034 		ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
1035 				false);
1036 		if (ret && ret != -EBUSY)
1037 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1038 					dep->name);
1039 		return ret;
1040 	}
1041 
1042 	/*
1043 	 * 4. Stream Capable Bulk Endpoints. We need to start the transfer
1044 	 * right away, otherwise host will not know we have streams to be
1045 	 * handled.
1046 	 */
1047 	if (dep->stream_capable) {
1048 		int	ret;
1049 
1050 		ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1051 		if (ret && ret != -EBUSY) {
1052 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1053 					dep->name);
1054 		}
1055 	}
1056 
1057 	return 0;
1058 }
1059 
1060 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1061 	gfp_t gfp_flags)
1062 {
1063 	struct dwc3_request		*req = to_dwc3_request(request);
1064 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1065 
1066 	unsigned long			flags;
1067 
1068 	int				ret;
1069 
1070 	spin_lock_irqsave(&dwc->lock, flags);
1071 	if (!dep->endpoint.desc) {
1072 		dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1073 				request, ep->name);
1074 		ret = -ESHUTDOWN;
1075 		goto out;
1076 	}
1077 
1078 	if (req->dep != dep) {
1079 		WARN(true, "request %p belongs to '%s'\n",
1080 				request, req->dep->name);
1081 		ret = -EINVAL;
1082 		goto out;
1083 	}
1084 
1085 	dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1086 			request, ep->name, request->length);
1087 
1088 	ret = __dwc3_gadget_ep_queue(dep, req);
1089 
1090 out:
1091 	spin_unlock_irqrestore(&dwc->lock, flags);
1092 
1093 	return ret;
1094 }
1095 
1096 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1097 		struct usb_request *request)
1098 {
1099 	struct dwc3_request		*req = to_dwc3_request(request);
1100 	struct dwc3_request		*r = NULL;
1101 
1102 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1103 	struct dwc3			*dwc = dep->dwc;
1104 
1105 	unsigned long			flags;
1106 	int				ret = 0;
1107 
1108 	spin_lock_irqsave(&dwc->lock, flags);
1109 
1110 	list_for_each_entry(r, &dep->request_list, list) {
1111 		if (r == req)
1112 			break;
1113 	}
1114 
1115 	if (r != req) {
1116 		list_for_each_entry(r, &dep->req_queued, list) {
1117 			if (r == req)
1118 				break;
1119 		}
1120 		if (r == req) {
1121 			/* wait until it is processed */
1122 			dwc3_stop_active_transfer(dwc, dep->number, true);
1123 			goto out1;
1124 		}
1125 		dev_err(dwc->dev, "request %p was not queued to %s\n",
1126 				request, ep->name);
1127 		ret = -EINVAL;
1128 		goto out0;
1129 	}
1130 
1131 out1:
1132 	/* giveback the request */
1133 	dwc3_gadget_giveback(dep, req, -ECONNRESET);
1134 
1135 out0:
1136 	spin_unlock_irqrestore(&dwc->lock, flags);
1137 
1138 	return ret;
1139 }
1140 
1141 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1142 {
1143 	struct dwc3_gadget_ep_cmd_params	params;
1144 	struct dwc3				*dwc = dep->dwc;
1145 	int					ret;
1146 
1147 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1148 		dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1149 		return -EINVAL;
1150 	}
1151 
1152 	memset(&params, 0x00, sizeof(params));
1153 
1154 	if (value) {
1155 		if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
1156 				(!list_empty(&dep->req_queued) ||
1157 				 !list_empty(&dep->request_list)))) {
1158 			dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
1159 					dep->name);
1160 			return -EAGAIN;
1161 		}
1162 
1163 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1164 			DWC3_DEPCMD_SETSTALL, &params);
1165 		if (ret)
1166 			dev_err(dwc->dev, "failed to set STALL on %s\n",
1167 					dep->name);
1168 		else
1169 			dep->flags |= DWC3_EP_STALL;
1170 	} else {
1171 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1172 			DWC3_DEPCMD_CLEARSTALL, &params);
1173 		if (ret)
1174 			dev_err(dwc->dev, "failed to clear STALL on %s\n",
1175 					dep->name);
1176 		else
1177 			dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1178 	}
1179 
1180 	return ret;
1181 }
1182 
1183 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1184 {
1185 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1186 
1187 	unsigned long			flags;
1188 
1189 	int				ret;
1190 
1191 	spin_lock_irqsave(&dwc->lock, flags);
1192 	ret = __dwc3_gadget_ep_set_halt(dep, value, false);
1193 	spin_unlock_irqrestore(&dwc->lock, flags);
1194 
1195 	return ret;
1196 }
1197 
1198 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1199 {
1200 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1201 	unsigned long			flags;
1202 	int				ret;
1203 
1204 	spin_lock_irqsave(&dwc->lock, flags);
1205 	dep->flags |= DWC3_EP_WEDGE;
1206 
1207 	if (dep->number == 0 || dep->number == 1)
1208 		ret = __dwc3_gadget_ep0_set_halt(ep, 1);
1209 	else
1210 		ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
1211 	spin_unlock_irqrestore(&dwc->lock, flags);
1212 
1213 	return ret;
1214 }
1215 
1216 /* -------------------------------------------------------------------------- */
1217 
1218 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1219 	.bLength	= USB_DT_ENDPOINT_SIZE,
1220 	.bDescriptorType = USB_DT_ENDPOINT,
1221 	.bmAttributes	= USB_ENDPOINT_XFER_CONTROL,
1222 };
1223 
1224 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1225 	.enable		= dwc3_gadget_ep0_enable,
1226 	.disable	= dwc3_gadget_ep0_disable,
1227 	.alloc_request	= dwc3_gadget_ep_alloc_request,
1228 	.free_request	= dwc3_gadget_ep_free_request,
1229 	.queue		= dwc3_gadget_ep0_queue,
1230 	.dequeue	= dwc3_gadget_ep_dequeue,
1231 	.set_halt	= dwc3_gadget_ep0_set_halt,
1232 	.set_wedge	= dwc3_gadget_ep_set_wedge,
1233 };
1234 
1235 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1236 	.enable		= dwc3_gadget_ep_enable,
1237 	.disable	= dwc3_gadget_ep_disable,
1238 	.alloc_request	= dwc3_gadget_ep_alloc_request,
1239 	.free_request	= dwc3_gadget_ep_free_request,
1240 	.queue		= dwc3_gadget_ep_queue,
1241 	.dequeue	= dwc3_gadget_ep_dequeue,
1242 	.set_halt	= dwc3_gadget_ep_set_halt,
1243 	.set_wedge	= dwc3_gadget_ep_set_wedge,
1244 };
1245 
1246 /* -------------------------------------------------------------------------- */
1247 
1248 static int dwc3_gadget_get_frame(struct usb_gadget *g)
1249 {
1250 	struct dwc3		*dwc = gadget_to_dwc(g);
1251 	u32			reg;
1252 
1253 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1254 	return DWC3_DSTS_SOFFN(reg);
1255 }
1256 
1257 static int dwc3_gadget_wakeup(struct usb_gadget *g)
1258 {
1259 	struct dwc3		*dwc = gadget_to_dwc(g);
1260 
1261 	unsigned long		timeout;
1262 	unsigned long		flags;
1263 
1264 	u32			reg;
1265 
1266 	int			ret = 0;
1267 
1268 	u8			link_state;
1269 	u8			speed;
1270 
1271 	spin_lock_irqsave(&dwc->lock, flags);
1272 
1273 	/*
1274 	 * According to the Databook Remote wakeup request should
1275 	 * be issued only when the device is in early suspend state.
1276 	 *
1277 	 * We can check that via USB Link State bits in DSTS register.
1278 	 */
1279 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1280 
1281 	speed = reg & DWC3_DSTS_CONNECTSPD;
1282 	if (speed == DWC3_DSTS_SUPERSPEED) {
1283 		dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1284 		ret = -EINVAL;
1285 		goto out;
1286 	}
1287 
1288 	link_state = DWC3_DSTS_USBLNKST(reg);
1289 
1290 	switch (link_state) {
1291 	case DWC3_LINK_STATE_RX_DET:	/* in HS, means Early Suspend */
1292 	case DWC3_LINK_STATE_U3:	/* in HS, means SUSPEND */
1293 		break;
1294 	default:
1295 		dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1296 				link_state);
1297 		ret = -EINVAL;
1298 		goto out;
1299 	}
1300 
1301 	ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1302 	if (ret < 0) {
1303 		dev_err(dwc->dev, "failed to put link in Recovery\n");
1304 		goto out;
1305 	}
1306 
1307 	/* Recent versions do this automatically */
1308 	if (dwc->revision < DWC3_REVISION_194A) {
1309 		/* write zeroes to Link Change Request */
1310 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1311 		reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1312 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1313 	}
1314 
1315 	/* poll until Link State changes to ON */
1316 	timeout = 1000;
1317 
1318 	while (timeout--) {
1319 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1320 
1321 		/* in HS, means ON */
1322 		if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1323 			break;
1324 	}
1325 
1326 	if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1327 		dev_err(dwc->dev, "failed to send remote wakeup\n");
1328 		ret = -EINVAL;
1329 	}
1330 
1331 out:
1332 	spin_unlock_irqrestore(&dwc->lock, flags);
1333 
1334 	return ret;
1335 }
1336 
1337 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1338 		int is_selfpowered)
1339 {
1340 	struct dwc3		*dwc = gadget_to_dwc(g);
1341 	unsigned long		flags;
1342 
1343 	spin_lock_irqsave(&dwc->lock, flags);
1344 	dwc->is_selfpowered = !!is_selfpowered;
1345 	spin_unlock_irqrestore(&dwc->lock, flags);
1346 
1347 	return 0;
1348 }
1349 
1350 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
1351 {
1352 	u32			reg;
1353 	u32			timeout = 500;
1354 
1355 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1356 	if (is_on) {
1357 		if (dwc->revision <= DWC3_REVISION_187A) {
1358 			reg &= ~DWC3_DCTL_TRGTULST_MASK;
1359 			reg |= DWC3_DCTL_TRGTULST_RX_DET;
1360 		}
1361 
1362 		if (dwc->revision >= DWC3_REVISION_194A)
1363 			reg &= ~DWC3_DCTL_KEEP_CONNECT;
1364 		reg |= DWC3_DCTL_RUN_STOP;
1365 
1366 		if (dwc->has_hibernation)
1367 			reg |= DWC3_DCTL_KEEP_CONNECT;
1368 
1369 		dwc->pullups_connected = true;
1370 	} else {
1371 		reg &= ~DWC3_DCTL_RUN_STOP;
1372 
1373 		if (dwc->has_hibernation && !suspend)
1374 			reg &= ~DWC3_DCTL_KEEP_CONNECT;
1375 
1376 		dwc->pullups_connected = false;
1377 	}
1378 
1379 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1380 
1381 	do {
1382 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1383 		if (is_on) {
1384 			if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1385 				break;
1386 		} else {
1387 			if (reg & DWC3_DSTS_DEVCTRLHLT)
1388 				break;
1389 		}
1390 		timeout--;
1391 		if (!timeout)
1392 			return -ETIMEDOUT;
1393 		udelay(1);
1394 	} while (1);
1395 
1396 	dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1397 			dwc->gadget_driver
1398 			? dwc->gadget_driver->function : "no-function",
1399 			is_on ? "connect" : "disconnect");
1400 
1401 	return 0;
1402 }
1403 
1404 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1405 {
1406 	struct dwc3		*dwc = gadget_to_dwc(g);
1407 	unsigned long		flags;
1408 	int			ret;
1409 
1410 	is_on = !!is_on;
1411 
1412 	spin_lock_irqsave(&dwc->lock, flags);
1413 	ret = dwc3_gadget_run_stop(dwc, is_on, false);
1414 	spin_unlock_irqrestore(&dwc->lock, flags);
1415 
1416 	return ret;
1417 }
1418 
1419 static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1420 {
1421 	u32			reg;
1422 
1423 	/* Enable all but Start and End of Frame IRQs */
1424 	reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1425 			DWC3_DEVTEN_EVNTOVERFLOWEN |
1426 			DWC3_DEVTEN_CMDCMPLTEN |
1427 			DWC3_DEVTEN_ERRTICERREN |
1428 			DWC3_DEVTEN_WKUPEVTEN |
1429 			DWC3_DEVTEN_ULSTCNGEN |
1430 			DWC3_DEVTEN_CONNECTDONEEN |
1431 			DWC3_DEVTEN_USBRSTEN |
1432 			DWC3_DEVTEN_DISCONNEVTEN);
1433 
1434 	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1435 }
1436 
1437 static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1438 {
1439 	/* mask all interrupts */
1440 	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1441 }
1442 
1443 static int dwc3_gadget_start(struct usb_gadget *g,
1444 		struct usb_gadget_driver *driver)
1445 {
1446 	struct dwc3		*dwc = gadget_to_dwc(g);
1447 	struct dwc3_ep		*dep;
1448 	unsigned long		flags;
1449 	int			ret = 0;
1450 	u32			reg;
1451 
1452 	spin_lock_irqsave(&dwc->lock, flags);
1453 
1454 	if (dwc->gadget_driver) {
1455 		dev_err(dwc->dev, "%s is already bound to %s\n",
1456 				dwc->gadget.name,
1457 				dwc->gadget_driver->function);
1458 		ret = -EBUSY;
1459 		goto err1;
1460 	}
1461 
1462 	dwc->gadget_driver	= driver;
1463 
1464 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1465 	reg &= ~(DWC3_DCFG_SPEED_MASK);
1466 
1467 	/**
1468 	 * WORKAROUND: DWC3 revision < 2.20a have an issue
1469 	 * which would cause metastability state on Run/Stop
1470 	 * bit if we try to force the IP to USB2-only mode.
1471 	 *
1472 	 * Because of that, we cannot configure the IP to any
1473 	 * speed other than the SuperSpeed
1474 	 *
1475 	 * Refers to:
1476 	 *
1477 	 * STAR#9000525659: Clock Domain Crossing on DCTL in
1478 	 * USB 2.0 Mode
1479 	 */
1480 	if (dwc->revision < DWC3_REVISION_220A) {
1481 		reg |= DWC3_DCFG_SUPERSPEED;
1482 	} else {
1483 		switch (dwc->maximum_speed) {
1484 		case USB_SPEED_LOW:
1485 			reg |= DWC3_DSTS_LOWSPEED;
1486 			break;
1487 		case USB_SPEED_FULL:
1488 			reg |= DWC3_DSTS_FULLSPEED1;
1489 			break;
1490 		case USB_SPEED_HIGH:
1491 			reg |= DWC3_DSTS_HIGHSPEED;
1492 			break;
1493 		case USB_SPEED_SUPER:	/* FALLTHROUGH */
1494 		case USB_SPEED_UNKNOWN:	/* FALTHROUGH */
1495 		default:
1496 			reg |= DWC3_DSTS_SUPERSPEED;
1497 		}
1498 	}
1499 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1500 
1501 	dwc->start_config_issued = false;
1502 
1503 	/* Start with SuperSpeed Default */
1504 	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1505 
1506 	dep = dwc->eps[0];
1507 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1508 			false);
1509 	if (ret) {
1510 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1511 		goto err2;
1512 	}
1513 
1514 	dep = dwc->eps[1];
1515 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1516 			false);
1517 	if (ret) {
1518 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1519 		goto err3;
1520 	}
1521 
1522 	/* begin to receive SETUP packets */
1523 	dwc->ep0state = EP0_SETUP_PHASE;
1524 	dwc3_ep0_out_start(dwc);
1525 
1526 	dwc3_gadget_enable_irq(dwc);
1527 
1528 	spin_unlock_irqrestore(&dwc->lock, flags);
1529 
1530 	return 0;
1531 
1532 err3:
1533 	__dwc3_gadget_ep_disable(dwc->eps[0]);
1534 
1535 err2:
1536 	dwc->gadget_driver = NULL;
1537 
1538 err1:
1539 	spin_unlock_irqrestore(&dwc->lock, flags);
1540 
1541 	return ret;
1542 }
1543 
1544 static int dwc3_gadget_stop(struct usb_gadget *g)
1545 {
1546 	struct dwc3		*dwc = gadget_to_dwc(g);
1547 	unsigned long		flags;
1548 
1549 	spin_lock_irqsave(&dwc->lock, flags);
1550 
1551 	dwc3_gadget_disable_irq(dwc);
1552 	__dwc3_gadget_ep_disable(dwc->eps[0]);
1553 	__dwc3_gadget_ep_disable(dwc->eps[1]);
1554 
1555 	dwc->gadget_driver	= NULL;
1556 
1557 	spin_unlock_irqrestore(&dwc->lock, flags);
1558 
1559 	return 0;
1560 }
1561 
1562 static const struct usb_gadget_ops dwc3_gadget_ops = {
1563 	.get_frame		= dwc3_gadget_get_frame,
1564 	.wakeup			= dwc3_gadget_wakeup,
1565 	.set_selfpowered	= dwc3_gadget_set_selfpowered,
1566 	.pullup			= dwc3_gadget_pullup,
1567 	.udc_start		= dwc3_gadget_start,
1568 	.udc_stop		= dwc3_gadget_stop,
1569 };
1570 
1571 /* -------------------------------------------------------------------------- */
1572 
1573 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1574 		u8 num, u32 direction)
1575 {
1576 	struct dwc3_ep			*dep;
1577 	u8				i;
1578 
1579 	for (i = 0; i < num; i++) {
1580 		u8 epnum = (i << 1) | (!!direction);
1581 
1582 		dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1583 		if (!dep)
1584 			return -ENOMEM;
1585 
1586 		dep->dwc = dwc;
1587 		dep->number = epnum;
1588 		dep->direction = !!direction;
1589 		dwc->eps[epnum] = dep;
1590 
1591 		snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1592 				(epnum & 1) ? "in" : "out");
1593 
1594 		dep->endpoint.name = dep->name;
1595 
1596 		dev_vdbg(dwc->dev, "initializing %s\n", dep->name);
1597 
1598 		if (epnum == 0 || epnum == 1) {
1599 			usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
1600 			dep->endpoint.maxburst = 1;
1601 			dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1602 			if (!epnum)
1603 				dwc->gadget.ep0 = &dep->endpoint;
1604 		} else {
1605 			int		ret;
1606 
1607 			usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
1608 			dep->endpoint.max_streams = 15;
1609 			dep->endpoint.ops = &dwc3_gadget_ep_ops;
1610 			list_add_tail(&dep->endpoint.ep_list,
1611 					&dwc->gadget.ep_list);
1612 
1613 			ret = dwc3_alloc_trb_pool(dep);
1614 			if (ret)
1615 				return ret;
1616 		}
1617 
1618 		INIT_LIST_HEAD(&dep->request_list);
1619 		INIT_LIST_HEAD(&dep->req_queued);
1620 	}
1621 
1622 	return 0;
1623 }
1624 
1625 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1626 {
1627 	int				ret;
1628 
1629 	INIT_LIST_HEAD(&dwc->gadget.ep_list);
1630 
1631 	ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1632 	if (ret < 0) {
1633 		dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n");
1634 		return ret;
1635 	}
1636 
1637 	ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1638 	if (ret < 0) {
1639 		dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n");
1640 		return ret;
1641 	}
1642 
1643 	return 0;
1644 }
1645 
1646 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1647 {
1648 	struct dwc3_ep			*dep;
1649 	u8				epnum;
1650 
1651 	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1652 		dep = dwc->eps[epnum];
1653 		if (!dep)
1654 			continue;
1655 		/*
1656 		 * Physical endpoints 0 and 1 are special; they form the
1657 		 * bi-directional USB endpoint 0.
1658 		 *
1659 		 * For those two physical endpoints, we don't allocate a TRB
1660 		 * pool nor do we add them the endpoints list. Due to that, we
1661 		 * shouldn't do these two operations otherwise we would end up
1662 		 * with all sorts of bugs when removing dwc3.ko.
1663 		 */
1664 		if (epnum != 0 && epnum != 1) {
1665 			dwc3_free_trb_pool(dep);
1666 			list_del(&dep->endpoint.ep_list);
1667 		}
1668 
1669 		kfree(dep);
1670 	}
1671 }
1672 
1673 /* -------------------------------------------------------------------------- */
1674 
1675 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1676 		struct dwc3_request *req, struct dwc3_trb *trb,
1677 		const struct dwc3_event_depevt *event, int status)
1678 {
1679 	unsigned int		count;
1680 	unsigned int		s_pkt = 0;
1681 	unsigned int		trb_status;
1682 
1683 	if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1684 		/*
1685 		 * We continue despite the error. There is not much we
1686 		 * can do. If we don't clean it up we loop forever. If
1687 		 * we skip the TRB then it gets overwritten after a
1688 		 * while since we use them in a ring buffer. A BUG()
1689 		 * would help. Lets hope that if this occurs, someone
1690 		 * fixes the root cause instead of looking away :)
1691 		 */
1692 		dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1693 				dep->name, trb);
1694 	count = trb->size & DWC3_TRB_SIZE_MASK;
1695 
1696 	if (dep->direction) {
1697 		if (count) {
1698 			trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1699 			if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1700 				dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1701 						dep->name);
1702 				/*
1703 				 * If missed isoc occurred and there is
1704 				 * no request queued then issue END
1705 				 * TRANSFER, so that core generates
1706 				 * next xfernotready and we will issue
1707 				 * a fresh START TRANSFER.
1708 				 * If there are still queued request
1709 				 * then wait, do not issue either END
1710 				 * or UPDATE TRANSFER, just attach next
1711 				 * request in request_list during
1712 				 * giveback.If any future queued request
1713 				 * is successfully transferred then we
1714 				 * will issue UPDATE TRANSFER for all
1715 				 * request in the request_list.
1716 				 */
1717 				dep->flags |= DWC3_EP_MISSED_ISOC;
1718 			} else {
1719 				dev_err(dwc->dev, "incomplete IN transfer %s\n",
1720 						dep->name);
1721 				status = -ECONNRESET;
1722 			}
1723 		} else {
1724 			dep->flags &= ~DWC3_EP_MISSED_ISOC;
1725 		}
1726 	} else {
1727 		if (count && (event->status & DEPEVT_STATUS_SHORT))
1728 			s_pkt = 1;
1729 	}
1730 
1731 	/*
1732 	 * We assume here we will always receive the entire data block
1733 	 * which we should receive. Meaning, if we program RX to
1734 	 * receive 4K but we receive only 2K, we assume that's all we
1735 	 * should receive and we simply bounce the request back to the
1736 	 * gadget driver for further processing.
1737 	 */
1738 	req->request.actual += req->request.length - count;
1739 	if (s_pkt)
1740 		return 1;
1741 	if ((event->status & DEPEVT_STATUS_LST) &&
1742 			(trb->ctrl & (DWC3_TRB_CTRL_LST |
1743 				DWC3_TRB_CTRL_HWO)))
1744 		return 1;
1745 	if ((event->status & DEPEVT_STATUS_IOC) &&
1746 			(trb->ctrl & DWC3_TRB_CTRL_IOC))
1747 		return 1;
1748 	return 0;
1749 }
1750 
1751 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1752 		const struct dwc3_event_depevt *event, int status)
1753 {
1754 	struct dwc3_request	*req;
1755 	struct dwc3_trb		*trb;
1756 	unsigned int		slot;
1757 
1758 	req = next_request(&dep->req_queued);
1759 	if (!req) {
1760 		WARN_ON_ONCE(1);
1761 		return 1;
1762 	}
1763 
1764 	slot = req->start_slot;
1765 	if ((slot == DWC3_TRB_NUM - 1) &&
1766 	    usb_endpoint_xfer_isoc(dep->endpoint.desc))
1767 		slot++;
1768 	slot %= DWC3_TRB_NUM;
1769 	trb = &dep->trb_pool[slot];
1770 
1771 	dwc3_flush_cache((uintptr_t)trb, sizeof(*trb));
1772 	__dwc3_cleanup_done_trbs(dwc, dep, req, trb, event, status);
1773 	dwc3_gadget_giveback(dep, req, status);
1774 
1775 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1776 			list_empty(&dep->req_queued)) {
1777 		if (list_empty(&dep->request_list)) {
1778 			/*
1779 			 * If there is no entry in request list then do
1780 			 * not issue END TRANSFER now. Just set PENDING
1781 			 * flag, so that END TRANSFER is issued when an
1782 			 * entry is added into request list.
1783 			 */
1784 			dep->flags = DWC3_EP_PENDING_REQUEST;
1785 		} else {
1786 			dwc3_stop_active_transfer(dwc, dep->number, true);
1787 			dep->flags = DWC3_EP_ENABLED;
1788 		}
1789 		return 1;
1790 	}
1791 
1792 	return 1;
1793 }
1794 
1795 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1796 		struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1797 {
1798 	unsigned		status = 0;
1799 	int			clean_busy;
1800 
1801 	if (event->status & DEPEVT_STATUS_BUSERR)
1802 		status = -ECONNRESET;
1803 
1804 	clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
1805 	if (clean_busy)
1806 		dep->flags &= ~DWC3_EP_BUSY;
1807 
1808 	/*
1809 	 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1810 	 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1811 	 */
1812 	if (dwc->revision < DWC3_REVISION_183A) {
1813 		u32		reg;
1814 		int		i;
1815 
1816 		for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1817 			dep = dwc->eps[i];
1818 
1819 			if (!(dep->flags & DWC3_EP_ENABLED))
1820 				continue;
1821 
1822 			if (!list_empty(&dep->req_queued))
1823 				return;
1824 		}
1825 
1826 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1827 		reg |= dwc->u1u2;
1828 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1829 
1830 		dwc->u1u2 = 0;
1831 	}
1832 }
1833 
1834 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1835 		const struct dwc3_event_depevt *event)
1836 {
1837 	struct dwc3_ep		*dep;
1838 	u8			epnum = event->endpoint_number;
1839 
1840 	dep = dwc->eps[epnum];
1841 
1842 	if (!(dep->flags & DWC3_EP_ENABLED))
1843 		return;
1844 
1845 	if (epnum == 0 || epnum == 1) {
1846 		dwc3_ep0_interrupt(dwc, event);
1847 		return;
1848 	}
1849 
1850 	switch (event->endpoint_event) {
1851 	case DWC3_DEPEVT_XFERCOMPLETE:
1852 		dep->resource_index = 0;
1853 
1854 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1855 			dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1856 					dep->name);
1857 			return;
1858 		}
1859 
1860 		dwc3_endpoint_transfer_complete(dwc, dep, event);
1861 		break;
1862 	case DWC3_DEPEVT_XFERINPROGRESS:
1863 		dwc3_endpoint_transfer_complete(dwc, dep, event);
1864 		break;
1865 	case DWC3_DEPEVT_XFERNOTREADY:
1866 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1867 			dwc3_gadget_start_isoc(dwc, dep, event);
1868 		} else {
1869 			int ret;
1870 
1871 			dev_vdbg(dwc->dev, "%s: reason %s\n",
1872 					dep->name, event->status &
1873 					DEPEVT_STATUS_TRANSFER_ACTIVE
1874 					? "Transfer Active"
1875 					: "Transfer Not Active");
1876 
1877 			ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1878 			if (!ret || ret == -EBUSY)
1879 				return;
1880 
1881 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1882 					dep->name);
1883 		}
1884 
1885 		break;
1886 	case DWC3_DEPEVT_STREAMEVT:
1887 		if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
1888 			dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1889 					dep->name);
1890 			return;
1891 		}
1892 
1893 		switch (event->status) {
1894 		case DEPEVT_STREAMEVT_FOUND:
1895 			dev_vdbg(dwc->dev, "Stream %d found and started\n",
1896 					event->parameters);
1897 
1898 			break;
1899 		case DEPEVT_STREAMEVT_NOTFOUND:
1900 			/* FALLTHROUGH */
1901 		default:
1902 			dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1903 		}
1904 		break;
1905 	case DWC3_DEPEVT_RXTXFIFOEVT:
1906 		dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1907 		break;
1908 	case DWC3_DEPEVT_EPCMDCMPLT:
1909 		dev_vdbg(dwc->dev, "Endpoint Command Complete\n");
1910 		break;
1911 	}
1912 }
1913 
1914 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1915 {
1916 	if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1917 		spin_unlock(&dwc->lock);
1918 		dwc->gadget_driver->disconnect(&dwc->gadget);
1919 		spin_lock(&dwc->lock);
1920 	}
1921 }
1922 
1923 static void dwc3_suspend_gadget(struct dwc3 *dwc)
1924 {
1925 	if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
1926 		spin_unlock(&dwc->lock);
1927 		dwc->gadget_driver->suspend(&dwc->gadget);
1928 		spin_lock(&dwc->lock);
1929 	}
1930 }
1931 
1932 static void dwc3_resume_gadget(struct dwc3 *dwc)
1933 {
1934 	if (dwc->gadget_driver && dwc->gadget_driver->resume) {
1935 		spin_unlock(&dwc->lock);
1936 		dwc->gadget_driver->resume(&dwc->gadget);
1937 	}
1938 }
1939 
1940 static void dwc3_reset_gadget(struct dwc3 *dwc)
1941 {
1942 	if (!dwc->gadget_driver)
1943 		return;
1944 
1945 	if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
1946 		spin_unlock(&dwc->lock);
1947 		usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
1948 		spin_lock(&dwc->lock);
1949 	}
1950 }
1951 
1952 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
1953 {
1954 	struct dwc3_ep *dep;
1955 	struct dwc3_gadget_ep_cmd_params params;
1956 	u32 cmd;
1957 	int ret;
1958 
1959 	dep = dwc->eps[epnum];
1960 
1961 	if (!dep->resource_index)
1962 		return;
1963 
1964 	/*
1965 	 * NOTICE: We are violating what the Databook says about the
1966 	 * EndTransfer command. Ideally we would _always_ wait for the
1967 	 * EndTransfer Command Completion IRQ, but that's causing too
1968 	 * much trouble synchronizing between us and gadget driver.
1969 	 *
1970 	 * We have discussed this with the IP Provider and it was
1971 	 * suggested to giveback all requests here, but give HW some
1972 	 * extra time to synchronize with the interconnect. We're using
1973 	 * an arbitraty 100us delay for that.
1974 	 *
1975 	 * Note also that a similar handling was tested by Synopsys
1976 	 * (thanks a lot Paul) and nothing bad has come out of it.
1977 	 * In short, what we're doing is:
1978 	 *
1979 	 * - Issue EndTransfer WITH CMDIOC bit set
1980 	 * - Wait 100us
1981 	 */
1982 
1983 	cmd = DWC3_DEPCMD_ENDTRANSFER;
1984 	cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
1985 	cmd |= DWC3_DEPCMD_CMDIOC;
1986 	cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
1987 	memset(&params, 0, sizeof(params));
1988 	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
1989 	WARN_ON_ONCE(ret);
1990 	dep->resource_index = 0;
1991 	dep->flags &= ~DWC3_EP_BUSY;
1992 	udelay(100);
1993 }
1994 
1995 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
1996 {
1997 	u32 epnum;
1998 
1999 	for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2000 		struct dwc3_ep *dep;
2001 
2002 		dep = dwc->eps[epnum];
2003 		if (!dep)
2004 			continue;
2005 
2006 		if (!(dep->flags & DWC3_EP_ENABLED))
2007 			continue;
2008 
2009 		dwc3_remove_requests(dwc, dep);
2010 	}
2011 }
2012 
2013 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2014 {
2015 	u32 epnum;
2016 
2017 	for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2018 		struct dwc3_ep *dep;
2019 		struct dwc3_gadget_ep_cmd_params params;
2020 		int ret;
2021 
2022 		dep = dwc->eps[epnum];
2023 		if (!dep)
2024 			continue;
2025 
2026 		if (!(dep->flags & DWC3_EP_STALL))
2027 			continue;
2028 
2029 		dep->flags &= ~DWC3_EP_STALL;
2030 
2031 		memset(&params, 0, sizeof(params));
2032 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2033 				DWC3_DEPCMD_CLEARSTALL, &params);
2034 		WARN_ON_ONCE(ret);
2035 	}
2036 }
2037 
2038 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2039 {
2040 	int			reg;
2041 
2042 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2043 	reg &= ~DWC3_DCTL_INITU1ENA;
2044 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2045 
2046 	reg &= ~DWC3_DCTL_INITU2ENA;
2047 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2048 
2049 	dwc3_disconnect_gadget(dwc);
2050 	dwc->start_config_issued = false;
2051 
2052 	dwc->gadget.speed = USB_SPEED_UNKNOWN;
2053 	dwc->setup_packet_pending = false;
2054 	usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
2055 }
2056 
2057 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2058 {
2059 	u32			reg;
2060 
2061 	/*
2062 	 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2063 	 * would cause a missing Disconnect Event if there's a
2064 	 * pending Setup Packet in the FIFO.
2065 	 *
2066 	 * There's no suggested workaround on the official Bug
2067 	 * report, which states that "unless the driver/application
2068 	 * is doing any special handling of a disconnect event,
2069 	 * there is no functional issue".
2070 	 *
2071 	 * Unfortunately, it turns out that we _do_ some special
2072 	 * handling of a disconnect event, namely complete all
2073 	 * pending transfers, notify gadget driver of the
2074 	 * disconnection, and so on.
2075 	 *
2076 	 * Our suggested workaround is to follow the Disconnect
2077 	 * Event steps here, instead, based on a setup_packet_pending
2078 	 * flag. Such flag gets set whenever we have a XferNotReady
2079 	 * event on EP0 and gets cleared on XferComplete for the
2080 	 * same endpoint.
2081 	 *
2082 	 * Refers to:
2083 	 *
2084 	 * STAR#9000466709: RTL: Device : Disconnect event not
2085 	 * generated if setup packet pending in FIFO
2086 	 */
2087 	if (dwc->revision < DWC3_REVISION_188A) {
2088 		if (dwc->setup_packet_pending)
2089 			dwc3_gadget_disconnect_interrupt(dwc);
2090 	}
2091 
2092 	dwc3_reset_gadget(dwc);
2093 
2094 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2095 	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2096 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2097 	dwc->test_mode = false;
2098 
2099 	dwc3_stop_active_transfers(dwc);
2100 	dwc3_clear_stall_all_ep(dwc);
2101 	dwc->start_config_issued = false;
2102 
2103 	/* Reset device address to zero */
2104 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2105 	reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2106 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2107 }
2108 
2109 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2110 {
2111 	u32 reg;
2112 	u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2113 
2114 	/*
2115 	 * We change the clock only at SS but I dunno why I would want to do
2116 	 * this. Maybe it becomes part of the power saving plan.
2117 	 */
2118 
2119 	if (speed != DWC3_DSTS_SUPERSPEED)
2120 		return;
2121 
2122 	/*
2123 	 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2124 	 * each time on Connect Done.
2125 	 */
2126 	if (!usb30_clock)
2127 		return;
2128 
2129 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2130 	reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2131 	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2132 }
2133 
2134 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2135 {
2136 	struct dwc3_ep		*dep;
2137 	int			ret;
2138 	u32			reg;
2139 	u8			speed;
2140 
2141 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2142 	speed = reg & DWC3_DSTS_CONNECTSPD;
2143 	dwc->speed = speed;
2144 
2145 	dwc3_update_ram_clk_sel(dwc, speed);
2146 
2147 	switch (speed) {
2148 	case DWC3_DCFG_SUPERSPEED:
2149 		/*
2150 		 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2151 		 * would cause a missing USB3 Reset event.
2152 		 *
2153 		 * In such situations, we should force a USB3 Reset
2154 		 * event by calling our dwc3_gadget_reset_interrupt()
2155 		 * routine.
2156 		 *
2157 		 * Refers to:
2158 		 *
2159 		 * STAR#9000483510: RTL: SS : USB3 reset event may
2160 		 * not be generated always when the link enters poll
2161 		 */
2162 		if (dwc->revision < DWC3_REVISION_190A)
2163 			dwc3_gadget_reset_interrupt(dwc);
2164 
2165 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2166 		dwc->gadget.ep0->maxpacket = 512;
2167 		dwc->gadget.speed = USB_SPEED_SUPER;
2168 		break;
2169 	case DWC3_DCFG_HIGHSPEED:
2170 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2171 		dwc->gadget.ep0->maxpacket = 64;
2172 		dwc->gadget.speed = USB_SPEED_HIGH;
2173 		break;
2174 	case DWC3_DCFG_FULLSPEED2:
2175 	case DWC3_DCFG_FULLSPEED1:
2176 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2177 		dwc->gadget.ep0->maxpacket = 64;
2178 		dwc->gadget.speed = USB_SPEED_FULL;
2179 		break;
2180 	case DWC3_DCFG_LOWSPEED:
2181 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2182 		dwc->gadget.ep0->maxpacket = 8;
2183 		dwc->gadget.speed = USB_SPEED_LOW;
2184 		break;
2185 	}
2186 
2187 	/* Enable USB2 LPM Capability */
2188 
2189 	if ((dwc->revision > DWC3_REVISION_194A)
2190 			&& (speed != DWC3_DCFG_SUPERSPEED)) {
2191 		reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2192 		reg |= DWC3_DCFG_LPM_CAP;
2193 		dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2194 
2195 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2196 		reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2197 
2198 		reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2199 
2200 		/*
2201 		 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2202 		 * DCFG.LPMCap is set, core responses with an ACK and the
2203 		 * BESL value in the LPM token is less than or equal to LPM
2204 		 * NYET threshold.
2205 		 */
2206 		if (dwc->revision < DWC3_REVISION_240A 	&& dwc->has_lpm_erratum)
2207 			WARN(true, "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2208 
2209 		if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2210 			reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2211 
2212 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2213 	} else {
2214 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2215 		reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2216 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2217 	}
2218 
2219 	dep = dwc->eps[0];
2220 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2221 			false);
2222 	if (ret) {
2223 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2224 		return;
2225 	}
2226 
2227 	dep = dwc->eps[1];
2228 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2229 			false);
2230 	if (ret) {
2231 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2232 		return;
2233 	}
2234 
2235 	/*
2236 	 * Configure PHY via GUSB3PIPECTLn if required.
2237 	 *
2238 	 * Update GTXFIFOSIZn
2239 	 *
2240 	 * In both cases reset values should be sufficient.
2241 	 */
2242 }
2243 
2244 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2245 {
2246 	/*
2247 	 * TODO take core out of low power mode when that's
2248 	 * implemented.
2249 	 */
2250 
2251 	dwc->gadget_driver->resume(&dwc->gadget);
2252 }
2253 
2254 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2255 		unsigned int evtinfo)
2256 {
2257 	enum dwc3_link_state	next = evtinfo & DWC3_LINK_STATE_MASK;
2258 	unsigned int		pwropt;
2259 
2260 	/*
2261 	 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2262 	 * Hibernation mode enabled which would show up when device detects
2263 	 * host-initiated U3 exit.
2264 	 *
2265 	 * In that case, device will generate a Link State Change Interrupt
2266 	 * from U3 to RESUME which is only necessary if Hibernation is
2267 	 * configured in.
2268 	 *
2269 	 * There are no functional changes due to such spurious event and we
2270 	 * just need to ignore it.
2271 	 *
2272 	 * Refers to:
2273 	 *
2274 	 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2275 	 * operational mode
2276 	 */
2277 	pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2278 	if ((dwc->revision < DWC3_REVISION_250A) &&
2279 			(pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2280 		if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2281 				(next == DWC3_LINK_STATE_RESUME)) {
2282 			dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n");
2283 			return;
2284 		}
2285 	}
2286 
2287 	/*
2288 	 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2289 	 * on the link partner, the USB session might do multiple entry/exit
2290 	 * of low power states before a transfer takes place.
2291 	 *
2292 	 * Due to this problem, we might experience lower throughput. The
2293 	 * suggested workaround is to disable DCTL[12:9] bits if we're
2294 	 * transitioning from U1/U2 to U0 and enable those bits again
2295 	 * after a transfer completes and there are no pending transfers
2296 	 * on any of the enabled endpoints.
2297 	 *
2298 	 * This is the first half of that workaround.
2299 	 *
2300 	 * Refers to:
2301 	 *
2302 	 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2303 	 * core send LGO_Ux entering U0
2304 	 */
2305 	if (dwc->revision < DWC3_REVISION_183A) {
2306 		if (next == DWC3_LINK_STATE_U0) {
2307 			u32	u1u2;
2308 			u32	reg;
2309 
2310 			switch (dwc->link_state) {
2311 			case DWC3_LINK_STATE_U1:
2312 			case DWC3_LINK_STATE_U2:
2313 				reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2314 				u1u2 = reg & (DWC3_DCTL_INITU2ENA
2315 						| DWC3_DCTL_ACCEPTU2ENA
2316 						| DWC3_DCTL_INITU1ENA
2317 						| DWC3_DCTL_ACCEPTU1ENA);
2318 
2319 				if (!dwc->u1u2)
2320 					dwc->u1u2 = reg & u1u2;
2321 
2322 				reg &= ~u1u2;
2323 
2324 				dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2325 				break;
2326 			default:
2327 				/* do nothing */
2328 				break;
2329 			}
2330 		}
2331 	}
2332 
2333 	switch (next) {
2334 	case DWC3_LINK_STATE_U1:
2335 		if (dwc->speed == USB_SPEED_SUPER)
2336 			dwc3_suspend_gadget(dwc);
2337 		break;
2338 	case DWC3_LINK_STATE_U2:
2339 	case DWC3_LINK_STATE_U3:
2340 		dwc3_suspend_gadget(dwc);
2341 		break;
2342 	case DWC3_LINK_STATE_RESUME:
2343 		dwc3_resume_gadget(dwc);
2344 		break;
2345 	default:
2346 		/* do nothing */
2347 		break;
2348 	}
2349 
2350 	dwc->link_state = next;
2351 }
2352 
2353 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2354 		unsigned int evtinfo)
2355 {
2356 	unsigned int is_ss = evtinfo & (1UL << 4);
2357 
2358 	/**
2359 	 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2360 	 * have a known issue which can cause USB CV TD.9.23 to fail
2361 	 * randomly.
2362 	 *
2363 	 * Because of this issue, core could generate bogus hibernation
2364 	 * events which SW needs to ignore.
2365 	 *
2366 	 * Refers to:
2367 	 *
2368 	 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2369 	 * Device Fallback from SuperSpeed
2370 	 */
2371 	if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2372 		return;
2373 
2374 	/* enter hibernation here */
2375 }
2376 
2377 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2378 		const struct dwc3_event_devt *event)
2379 {
2380 	switch (event->type) {
2381 	case DWC3_DEVICE_EVENT_DISCONNECT:
2382 		dwc3_gadget_disconnect_interrupt(dwc);
2383 		break;
2384 	case DWC3_DEVICE_EVENT_RESET:
2385 		dwc3_gadget_reset_interrupt(dwc);
2386 		break;
2387 	case DWC3_DEVICE_EVENT_CONNECT_DONE:
2388 		dwc3_gadget_conndone_interrupt(dwc);
2389 		break;
2390 	case DWC3_DEVICE_EVENT_WAKEUP:
2391 		dwc3_gadget_wakeup_interrupt(dwc);
2392 		break;
2393 	case DWC3_DEVICE_EVENT_HIBER_REQ:
2394 		if (!dwc->has_hibernation) {
2395 			WARN(1 ,"unexpected hibernation event\n");
2396 			break;
2397 		}
2398 		dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2399 		break;
2400 	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2401 		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2402 		break;
2403 	case DWC3_DEVICE_EVENT_EOPF:
2404 		dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2405 		break;
2406 	case DWC3_DEVICE_EVENT_SOF:
2407 		dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2408 		break;
2409 	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2410 		dev_vdbg(dwc->dev, "Erratic Error\n");
2411 		break;
2412 	case DWC3_DEVICE_EVENT_CMD_CMPL:
2413 		dev_vdbg(dwc->dev, "Command Complete\n");
2414 		break;
2415 	case DWC3_DEVICE_EVENT_OVERFLOW:
2416 		dev_vdbg(dwc->dev, "Overflow\n");
2417 		break;
2418 	default:
2419 		dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2420 	}
2421 }
2422 
2423 static void dwc3_process_event_entry(struct dwc3 *dwc,
2424 		const union dwc3_event *event)
2425 {
2426 	/* Endpoint IRQ, handle it and return early */
2427 	if (event->type.is_devspec == 0) {
2428 		/* depevt */
2429 		return dwc3_endpoint_interrupt(dwc, &event->depevt);
2430 	}
2431 
2432 	switch (event->type.type) {
2433 	case DWC3_EVENT_TYPE_DEV:
2434 		dwc3_gadget_interrupt(dwc, &event->devt);
2435 		break;
2436 	/* REVISIT what to do with Carkit and I2C events ? */
2437 	default:
2438 		dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2439 	}
2440 }
2441 
2442 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2443 {
2444 	struct dwc3_event_buffer *evt;
2445 	irqreturn_t ret = IRQ_NONE;
2446 	int left;
2447 	u32 reg;
2448 
2449 	evt = dwc->ev_buffs[buf];
2450 	left = evt->count;
2451 
2452 	if (!(evt->flags & DWC3_EVENT_PENDING))
2453 		return IRQ_NONE;
2454 
2455 	while (left > 0) {
2456 		union dwc3_event event;
2457 
2458 		event.raw = *(u32 *) (evt->buf + evt->lpos);
2459 
2460 		dwc3_process_event_entry(dwc, &event);
2461 
2462 		/*
2463 		 * FIXME we wrap around correctly to the next entry as
2464 		 * almost all entries are 4 bytes in size. There is one
2465 		 * entry which has 12 bytes which is a regular entry
2466 		 * followed by 8 bytes data. ATM I don't know how
2467 		 * things are organized if we get next to the a
2468 		 * boundary so I worry about that once we try to handle
2469 		 * that.
2470 		 */
2471 		evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2472 		left -= 4;
2473 
2474 		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2475 	}
2476 
2477 	evt->count = 0;
2478 	evt->flags &= ~DWC3_EVENT_PENDING;
2479 	ret = IRQ_HANDLED;
2480 
2481 	/* Unmask interrupt */
2482 	reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2483 	reg &= ~DWC3_GEVNTSIZ_INTMASK;
2484 	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2485 
2486 	return ret;
2487 }
2488 
2489 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2490 {
2491 	struct dwc3 *dwc = _dwc;
2492 	unsigned long flags;
2493 	irqreturn_t ret = IRQ_NONE;
2494 	int i;
2495 
2496 	spin_lock_irqsave(&dwc->lock, flags);
2497 
2498 	for (i = 0; i < dwc->num_event_buffers; i++)
2499 		ret |= dwc3_process_event_buf(dwc, i);
2500 
2501 	spin_unlock_irqrestore(&dwc->lock, flags);
2502 
2503 	return ret;
2504 }
2505 
2506 static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
2507 {
2508 	struct dwc3_event_buffer *evt;
2509 	u32 count;
2510 	u32 reg;
2511 
2512 	evt = dwc->ev_buffs[buf];
2513 
2514 	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2515 	count &= DWC3_GEVNTCOUNT_MASK;
2516 	if (!count)
2517 		return IRQ_NONE;
2518 
2519 	evt->count = count;
2520 	evt->flags |= DWC3_EVENT_PENDING;
2521 
2522 	/* Mask interrupt */
2523 	reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2524 	reg |= DWC3_GEVNTSIZ_INTMASK;
2525 	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2526 
2527 	return IRQ_WAKE_THREAD;
2528 }
2529 
2530 static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2531 {
2532 	struct dwc3			*dwc = _dwc;
2533 	int				i;
2534 	irqreturn_t			ret = IRQ_NONE;
2535 
2536 	spin_lock(&dwc->lock);
2537 
2538 	for (i = 0; i < dwc->num_event_buffers; i++) {
2539 		irqreturn_t status;
2540 
2541 		status = dwc3_check_event_buf(dwc, i);
2542 		if (status == IRQ_WAKE_THREAD)
2543 			ret = status;
2544 	}
2545 
2546 	spin_unlock(&dwc->lock);
2547 
2548 	return ret;
2549 }
2550 
2551 /**
2552  * dwc3_gadget_init - Initializes gadget related registers
2553  * @dwc: pointer to our controller context structure
2554  *
2555  * Returns 0 on success otherwise negative errno.
2556  */
2557 int dwc3_gadget_init(struct dwc3 *dwc)
2558 {
2559 	int					ret;
2560 
2561 	dwc->ctrl_req = dma_alloc_coherent(sizeof(*dwc->ctrl_req),
2562 					(unsigned long *)&dwc->ctrl_req_addr);
2563 	if (!dwc->ctrl_req) {
2564 		dev_err(dwc->dev, "failed to allocate ctrl request\n");
2565 		ret = -ENOMEM;
2566 		goto err0;
2567 	}
2568 
2569 	dwc->ep0_trb = dma_alloc_coherent(sizeof(*dwc->ep0_trb) * 2,
2570 					  (unsigned long *)&dwc->ep0_trb_addr);
2571 	if (!dwc->ep0_trb) {
2572 		dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2573 		ret = -ENOMEM;
2574 		goto err1;
2575 	}
2576 
2577 	dwc->setup_buf = memalign(CONFIG_SYS_CACHELINE_SIZE,
2578 				  DWC3_EP0_BOUNCE_SIZE);
2579 	if (!dwc->setup_buf) {
2580 		ret = -ENOMEM;
2581 		goto err2;
2582 	}
2583 
2584 	dwc->ep0_bounce = dma_alloc_coherent(DWC3_EP0_BOUNCE_SIZE,
2585 					(unsigned long *)&dwc->ep0_bounce_addr);
2586 	if (!dwc->ep0_bounce) {
2587 		dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2588 		ret = -ENOMEM;
2589 		goto err3;
2590 	}
2591 
2592 	dwc->gadget.ops			= &dwc3_gadget_ops;
2593 	dwc->gadget.max_speed		= USB_SPEED_SUPER;
2594 	dwc->gadget.speed		= USB_SPEED_UNKNOWN;
2595 	dwc->gadget.name		= "dwc3-gadget";
2596 
2597 	/*
2598 	 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2599 	 * on ep out.
2600 	 */
2601 	dwc->gadget.quirk_ep_out_aligned_size = true;
2602 
2603 	/*
2604 	 * REVISIT: Here we should clear all pending IRQs to be
2605 	 * sure we're starting from a well known location.
2606 	 */
2607 
2608 	ret = dwc3_gadget_init_endpoints(dwc);
2609 	if (ret)
2610 		goto err4;
2611 
2612 	ret = usb_add_gadget_udc((struct device *)dwc->dev, &dwc->gadget);
2613 	if (ret) {
2614 		dev_err(dwc->dev, "failed to register udc\n");
2615 		goto err4;
2616 	}
2617 
2618 	return 0;
2619 
2620 err4:
2621 	dwc3_gadget_free_endpoints(dwc);
2622 	dma_free_coherent(dwc->ep0_bounce);
2623 
2624 err3:
2625 	kfree(dwc->setup_buf);
2626 
2627 err2:
2628 	dma_free_coherent(dwc->ep0_trb);
2629 
2630 err1:
2631 	dma_free_coherent(dwc->ctrl_req);
2632 
2633 err0:
2634 	return ret;
2635 }
2636 
2637 /* -------------------------------------------------------------------------- */
2638 
2639 void dwc3_gadget_exit(struct dwc3 *dwc)
2640 {
2641 	usb_del_gadget_udc(&dwc->gadget);
2642 
2643 	dwc3_gadget_free_endpoints(dwc);
2644 
2645 	dma_free_coherent(dwc->ep0_bounce);
2646 
2647 	kfree(dwc->setup_buf);
2648 
2649 	dma_free_coherent(dwc->ep0_trb);
2650 
2651 	dma_free_coherent(dwc->ctrl_req);
2652 }
2653 
2654 /**
2655  * dwc3_gadget_uboot_handle_interrupt - handle dwc3 gadget interrupt
2656  * @dwc: struct dwce *
2657  *
2658  * Handles ep0 and gadget interrupt
2659  *
2660  * Should be called from dwc3 core.
2661  */
2662 void dwc3_gadget_uboot_handle_interrupt(struct dwc3 *dwc)
2663 {
2664 	int ret = dwc3_interrupt(0, dwc);
2665 
2666 	if (ret == IRQ_WAKE_THREAD) {
2667 		int i;
2668 		struct dwc3_event_buffer *evt;
2669 
2670 		dwc3_thread_interrupt(0, dwc);
2671 
2672 		/* Clean + Invalidate the buffers after touching them */
2673 		for (i = 0; i < dwc->num_event_buffers; i++) {
2674 			evt = dwc->ev_buffs[i];
2675 			dwc3_flush_cache((uintptr_t)evt->buf, evt->length);
2676 		}
2677 	}
2678 }
2679