xref: /openbmc/linux/drivers/usb/dwc3/gadget.c (revision a2cce7a9)
1 /**
2  * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3  *
4  * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5  *
6  * Authors: Felipe Balbi <balbi@ti.com>,
7  *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8  *
9  * This program is free software: you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2  of
11  * the License as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/list.h>
28 #include <linux/dma-mapping.h>
29 
30 #include <linux/usb/ch9.h>
31 #include <linux/usb/gadget.h>
32 
33 #include "debug.h"
34 #include "core.h"
35 #include "gadget.h"
36 #include "io.h"
37 
38 /**
39  * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
40  * @dwc: pointer to our context structure
41  * @mode: the mode to set (J, K SE0 NAK, Force Enable)
42  *
43  * Caller should take care of locking. This function will
44  * return 0 on success or -EINVAL if wrong Test Selector
45  * is passed
46  */
47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
48 {
49 	u32		reg;
50 
51 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
52 	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
53 
54 	switch (mode) {
55 	case TEST_J:
56 	case TEST_K:
57 	case TEST_SE0_NAK:
58 	case TEST_PACKET:
59 	case TEST_FORCE_EN:
60 		reg |= mode << 1;
61 		break;
62 	default:
63 		return -EINVAL;
64 	}
65 
66 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
67 
68 	return 0;
69 }
70 
71 /**
72  * dwc3_gadget_get_link_state - Gets current state of USB Link
73  * @dwc: pointer to our context structure
74  *
75  * Caller should take care of locking. This function will
76  * return the link state on success (>= 0) or -ETIMEDOUT.
77  */
78 int dwc3_gadget_get_link_state(struct dwc3 *dwc)
79 {
80 	u32		reg;
81 
82 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
83 
84 	return DWC3_DSTS_USBLNKST(reg);
85 }
86 
87 /**
88  * dwc3_gadget_set_link_state - Sets USB Link to a particular State
89  * @dwc: pointer to our context structure
90  * @state: the state to put link into
91  *
92  * Caller should take care of locking. This function will
93  * return 0 on success or -ETIMEDOUT.
94  */
95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
96 {
97 	int		retries = 10000;
98 	u32		reg;
99 
100 	/*
101 	 * Wait until device controller is ready. Only applies to 1.94a and
102 	 * later RTL.
103 	 */
104 	if (dwc->revision >= DWC3_REVISION_194A) {
105 		while (--retries) {
106 			reg = dwc3_readl(dwc->regs, DWC3_DSTS);
107 			if (reg & DWC3_DSTS_DCNRD)
108 				udelay(5);
109 			else
110 				break;
111 		}
112 
113 		if (retries <= 0)
114 			return -ETIMEDOUT;
115 	}
116 
117 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
118 	reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
119 
120 	/* set requested state */
121 	reg |= DWC3_DCTL_ULSTCHNGREQ(state);
122 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
123 
124 	/*
125 	 * The following code is racy when called from dwc3_gadget_wakeup,
126 	 * and is not needed, at least on newer versions
127 	 */
128 	if (dwc->revision >= DWC3_REVISION_194A)
129 		return 0;
130 
131 	/* wait for a change in DSTS */
132 	retries = 10000;
133 	while (--retries) {
134 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
135 
136 		if (DWC3_DSTS_USBLNKST(reg) == state)
137 			return 0;
138 
139 		udelay(5);
140 	}
141 
142 	dwc3_trace(trace_dwc3_gadget,
143 			"link state change request timed out");
144 
145 	return -ETIMEDOUT;
146 }
147 
148 /**
149  * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
150  * @dwc: pointer to our context structure
151  *
152  * This function will a best effort FIFO allocation in order
153  * to improve FIFO usage and throughput, while still allowing
154  * us to enable as many endpoints as possible.
155  *
156  * Keep in mind that this operation will be highly dependent
157  * on the configured size for RAM1 - which contains TxFifo -,
158  * the amount of endpoints enabled on coreConsultant tool, and
159  * the width of the Master Bus.
160  *
161  * In the ideal world, we would always be able to satisfy the
162  * following equation:
163  *
164  * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
165  * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
166  *
167  * Unfortunately, due to many variables that's not always the case.
168  */
169 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
170 {
171 	int		last_fifo_depth = 0;
172 	int		ram1_depth;
173 	int		fifo_size;
174 	int		mdwidth;
175 	int		num;
176 
177 	if (!dwc->needs_fifo_resize)
178 		return 0;
179 
180 	ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
181 	mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
182 
183 	/* MDWIDTH is represented in bits, we need it in bytes */
184 	mdwidth >>= 3;
185 
186 	/*
187 	 * FIXME For now we will only allocate 1 wMaxPacketSize space
188 	 * for each enabled endpoint, later patches will come to
189 	 * improve this algorithm so that we better use the internal
190 	 * FIFO space
191 	 */
192 	for (num = 0; num < dwc->num_in_eps; num++) {
193 		/* bit0 indicates direction; 1 means IN ep */
194 		struct dwc3_ep	*dep = dwc->eps[(num << 1) | 1];
195 		int		mult = 1;
196 		int		tmp;
197 
198 		if (!(dep->flags & DWC3_EP_ENABLED))
199 			continue;
200 
201 		if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
202 				|| usb_endpoint_xfer_isoc(dep->endpoint.desc))
203 			mult = 3;
204 
205 		/*
206 		 * REVISIT: the following assumes we will always have enough
207 		 * space available on the FIFO RAM for all possible use cases.
208 		 * Make sure that's true somehow and change FIFO allocation
209 		 * accordingly.
210 		 *
211 		 * If we have Bulk or Isochronous endpoints, we want
212 		 * them to be able to be very, very fast. So we're giving
213 		 * those endpoints a fifo_size which is enough for 3 full
214 		 * packets
215 		 */
216 		tmp = mult * (dep->endpoint.maxpacket + mdwidth);
217 		tmp += mdwidth;
218 
219 		fifo_size = DIV_ROUND_UP(tmp, mdwidth);
220 
221 		fifo_size |= (last_fifo_depth << 16);
222 
223 		dwc3_trace(trace_dwc3_gadget, "%s: Fifo Addr %04x Size %d",
224 				dep->name, last_fifo_depth, fifo_size & 0xffff);
225 
226 		dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
227 
228 		last_fifo_depth += (fifo_size & 0xffff);
229 	}
230 
231 	return 0;
232 }
233 
234 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
235 		int status)
236 {
237 	struct dwc3			*dwc = dep->dwc;
238 	int				i;
239 
240 	if (req->queued) {
241 		i = 0;
242 		do {
243 			dep->busy_slot++;
244 			/*
245 			 * Skip LINK TRB. We can't use req->trb and check for
246 			 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
247 			 * just completed (not the LINK TRB).
248 			 */
249 			if (((dep->busy_slot & DWC3_TRB_MASK) ==
250 				DWC3_TRB_NUM- 1) &&
251 				usb_endpoint_xfer_isoc(dep->endpoint.desc))
252 				dep->busy_slot++;
253 		} while(++i < req->request.num_mapped_sgs);
254 		req->queued = false;
255 	}
256 	list_del(&req->list);
257 	req->trb = NULL;
258 
259 	if (req->request.status == -EINPROGRESS)
260 		req->request.status = status;
261 
262 	if (dwc->ep0_bounced && dep->number == 0)
263 		dwc->ep0_bounced = false;
264 	else
265 		usb_gadget_unmap_request(&dwc->gadget, &req->request,
266 				req->direction);
267 
268 	dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
269 			req, dep->name, req->request.actual,
270 			req->request.length, status);
271 	trace_dwc3_gadget_giveback(req);
272 
273 	spin_unlock(&dwc->lock);
274 	usb_gadget_giveback_request(&dep->endpoint, &req->request);
275 	spin_lock(&dwc->lock);
276 }
277 
278 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
279 {
280 	u32		timeout = 500;
281 	u32		reg;
282 
283 	trace_dwc3_gadget_generic_cmd(cmd, param);
284 
285 	dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
286 	dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
287 
288 	do {
289 		reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
290 		if (!(reg & DWC3_DGCMD_CMDACT)) {
291 			dwc3_trace(trace_dwc3_gadget,
292 					"Command Complete --> %d",
293 					DWC3_DGCMD_STATUS(reg));
294 			if (DWC3_DGCMD_STATUS(reg))
295 				return -EINVAL;
296 			return 0;
297 		}
298 
299 		/*
300 		 * We can't sleep here, because it's also called from
301 		 * interrupt context.
302 		 */
303 		timeout--;
304 		if (!timeout) {
305 			dwc3_trace(trace_dwc3_gadget,
306 					"Command Timed Out");
307 			return -ETIMEDOUT;
308 		}
309 		udelay(1);
310 	} while (1);
311 }
312 
313 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
314 		unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
315 {
316 	struct dwc3_ep		*dep = dwc->eps[ep];
317 	u32			timeout = 500;
318 	u32			reg;
319 
320 	trace_dwc3_gadget_ep_cmd(dep, cmd, params);
321 
322 	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
323 	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
324 	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
325 
326 	dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
327 	do {
328 		reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
329 		if (!(reg & DWC3_DEPCMD_CMDACT)) {
330 			dwc3_trace(trace_dwc3_gadget,
331 					"Command Complete --> %d",
332 					DWC3_DEPCMD_STATUS(reg));
333 			if (DWC3_DEPCMD_STATUS(reg))
334 				return -EINVAL;
335 			return 0;
336 		}
337 
338 		/*
339 		 * We can't sleep here, because it is also called from
340 		 * interrupt context.
341 		 */
342 		timeout--;
343 		if (!timeout) {
344 			dwc3_trace(trace_dwc3_gadget,
345 					"Command Timed Out");
346 			return -ETIMEDOUT;
347 		}
348 
349 		udelay(1);
350 	} while (1);
351 }
352 
353 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
354 		struct dwc3_trb *trb)
355 {
356 	u32		offset = (char *) trb - (char *) dep->trb_pool;
357 
358 	return dep->trb_pool_dma + offset;
359 }
360 
361 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
362 {
363 	struct dwc3		*dwc = dep->dwc;
364 
365 	if (dep->trb_pool)
366 		return 0;
367 
368 	dep->trb_pool = dma_alloc_coherent(dwc->dev,
369 			sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
370 			&dep->trb_pool_dma, GFP_KERNEL);
371 	if (!dep->trb_pool) {
372 		dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
373 				dep->name);
374 		return -ENOMEM;
375 	}
376 
377 	return 0;
378 }
379 
380 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
381 {
382 	struct dwc3		*dwc = dep->dwc;
383 
384 	dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
385 			dep->trb_pool, dep->trb_pool_dma);
386 
387 	dep->trb_pool = NULL;
388 	dep->trb_pool_dma = 0;
389 }
390 
391 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
392 {
393 	struct dwc3_gadget_ep_cmd_params params;
394 	u32			cmd;
395 
396 	memset(&params, 0x00, sizeof(params));
397 
398 	if (dep->number != 1) {
399 		cmd = DWC3_DEPCMD_DEPSTARTCFG;
400 		/* XferRscIdx == 0 for ep0 and 2 for the remaining */
401 		if (dep->number > 1) {
402 			if (dwc->start_config_issued)
403 				return 0;
404 			dwc->start_config_issued = true;
405 			cmd |= DWC3_DEPCMD_PARAM(2);
406 		}
407 
408 		return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
409 	}
410 
411 	return 0;
412 }
413 
414 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
415 		const struct usb_endpoint_descriptor *desc,
416 		const struct usb_ss_ep_comp_descriptor *comp_desc,
417 		bool ignore, bool restore)
418 {
419 	struct dwc3_gadget_ep_cmd_params params;
420 
421 	memset(&params, 0x00, sizeof(params));
422 
423 	params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
424 		| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
425 
426 	/* Burst size is only needed in SuperSpeed mode */
427 	if (dwc->gadget.speed == USB_SPEED_SUPER) {
428 		u32 burst = dep->endpoint.maxburst - 1;
429 
430 		params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
431 	}
432 
433 	if (ignore)
434 		params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
435 
436 	if (restore) {
437 		params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
438 		params.param2 |= dep->saved_state;
439 	}
440 
441 	params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
442 		| DWC3_DEPCFG_XFER_NOT_READY_EN;
443 
444 	if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
445 		params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
446 			| DWC3_DEPCFG_STREAM_EVENT_EN;
447 		dep->stream_capable = true;
448 	}
449 
450 	if (!usb_endpoint_xfer_control(desc))
451 		params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
452 
453 	/*
454 	 * We are doing 1:1 mapping for endpoints, meaning
455 	 * Physical Endpoints 2 maps to Logical Endpoint 2 and
456 	 * so on. We consider the direction bit as part of the physical
457 	 * endpoint number. So USB endpoint 0x81 is 0x03.
458 	 */
459 	params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
460 
461 	/*
462 	 * We must use the lower 16 TX FIFOs even though
463 	 * HW might have more
464 	 */
465 	if (dep->direction)
466 		params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
467 
468 	if (desc->bInterval) {
469 		params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
470 		dep->interval = 1 << (desc->bInterval - 1);
471 	}
472 
473 	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
474 			DWC3_DEPCMD_SETEPCONFIG, &params);
475 }
476 
477 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
478 {
479 	struct dwc3_gadget_ep_cmd_params params;
480 
481 	memset(&params, 0x00, sizeof(params));
482 
483 	params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
484 
485 	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
486 			DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
487 }
488 
489 /**
490  * __dwc3_gadget_ep_enable - Initializes a HW endpoint
491  * @dep: endpoint to be initialized
492  * @desc: USB Endpoint Descriptor
493  *
494  * Caller should take care of locking
495  */
496 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
497 		const struct usb_endpoint_descriptor *desc,
498 		const struct usb_ss_ep_comp_descriptor *comp_desc,
499 		bool ignore, bool restore)
500 {
501 	struct dwc3		*dwc = dep->dwc;
502 	u32			reg;
503 	int			ret;
504 
505 	dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
506 
507 	if (!(dep->flags & DWC3_EP_ENABLED)) {
508 		ret = dwc3_gadget_start_config(dwc, dep);
509 		if (ret)
510 			return ret;
511 	}
512 
513 	ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
514 			restore);
515 	if (ret)
516 		return ret;
517 
518 	if (!(dep->flags & DWC3_EP_ENABLED)) {
519 		struct dwc3_trb	*trb_st_hw;
520 		struct dwc3_trb	*trb_link;
521 
522 		ret = dwc3_gadget_set_xfer_resource(dwc, dep);
523 		if (ret)
524 			return ret;
525 
526 		dep->endpoint.desc = desc;
527 		dep->comp_desc = comp_desc;
528 		dep->type = usb_endpoint_type(desc);
529 		dep->flags |= DWC3_EP_ENABLED;
530 
531 		reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
532 		reg |= DWC3_DALEPENA_EP(dep->number);
533 		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
534 
535 		if (!usb_endpoint_xfer_isoc(desc))
536 			return 0;
537 
538 		/* Link TRB for ISOC. The HWO bit is never reset */
539 		trb_st_hw = &dep->trb_pool[0];
540 
541 		trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
542 		memset(trb_link, 0, sizeof(*trb_link));
543 
544 		trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
545 		trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
546 		trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
547 		trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
548 	}
549 
550 	switch (usb_endpoint_type(desc)) {
551 	case USB_ENDPOINT_XFER_CONTROL:
552 		strlcat(dep->name, "-control", sizeof(dep->name));
553 		break;
554 	case USB_ENDPOINT_XFER_ISOC:
555 		strlcat(dep->name, "-isoc", sizeof(dep->name));
556 		break;
557 	case USB_ENDPOINT_XFER_BULK:
558 		strlcat(dep->name, "-bulk", sizeof(dep->name));
559 		break;
560 	case USB_ENDPOINT_XFER_INT:
561 		strlcat(dep->name, "-int", sizeof(dep->name));
562 		break;
563 	default:
564 		dev_err(dwc->dev, "invalid endpoint transfer type\n");
565 	}
566 
567 	return 0;
568 }
569 
570 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
571 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
572 {
573 	struct dwc3_request		*req;
574 
575 	if (!list_empty(&dep->req_queued)) {
576 		dwc3_stop_active_transfer(dwc, dep->number, true);
577 
578 		/* - giveback all requests to gadget driver */
579 		while (!list_empty(&dep->req_queued)) {
580 			req = next_request(&dep->req_queued);
581 
582 			dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
583 		}
584 	}
585 
586 	while (!list_empty(&dep->request_list)) {
587 		req = next_request(&dep->request_list);
588 
589 		dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
590 	}
591 }
592 
593 /**
594  * __dwc3_gadget_ep_disable - Disables a HW endpoint
595  * @dep: the endpoint to disable
596  *
597  * This function also removes requests which are currently processed ny the
598  * hardware and those which are not yet scheduled.
599  * Caller should take care of locking.
600  */
601 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
602 {
603 	struct dwc3		*dwc = dep->dwc;
604 	u32			reg;
605 
606 	dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name);
607 
608 	dwc3_remove_requests(dwc, dep);
609 
610 	/* make sure HW endpoint isn't stalled */
611 	if (dep->flags & DWC3_EP_STALL)
612 		__dwc3_gadget_ep_set_halt(dep, 0, false);
613 
614 	reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
615 	reg &= ~DWC3_DALEPENA_EP(dep->number);
616 	dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
617 
618 	dep->stream_capable = false;
619 	dep->endpoint.desc = NULL;
620 	dep->comp_desc = NULL;
621 	dep->type = 0;
622 	dep->flags = 0;
623 
624 	snprintf(dep->name, sizeof(dep->name), "ep%d%s",
625 			dep->number >> 1,
626 			(dep->number & 1) ? "in" : "out");
627 
628 	return 0;
629 }
630 
631 /* -------------------------------------------------------------------------- */
632 
633 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
634 		const struct usb_endpoint_descriptor *desc)
635 {
636 	return -EINVAL;
637 }
638 
639 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
640 {
641 	return -EINVAL;
642 }
643 
644 /* -------------------------------------------------------------------------- */
645 
646 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
647 		const struct usb_endpoint_descriptor *desc)
648 {
649 	struct dwc3_ep			*dep;
650 	struct dwc3			*dwc;
651 	unsigned long			flags;
652 	int				ret;
653 
654 	if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
655 		pr_debug("dwc3: invalid parameters\n");
656 		return -EINVAL;
657 	}
658 
659 	if (!desc->wMaxPacketSize) {
660 		pr_debug("dwc3: missing wMaxPacketSize\n");
661 		return -EINVAL;
662 	}
663 
664 	dep = to_dwc3_ep(ep);
665 	dwc = dep->dwc;
666 
667 	if (dep->flags & DWC3_EP_ENABLED) {
668 		dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
669 				dep->name);
670 		return 0;
671 	}
672 
673 	spin_lock_irqsave(&dwc->lock, flags);
674 	ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
675 	spin_unlock_irqrestore(&dwc->lock, flags);
676 
677 	return ret;
678 }
679 
680 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
681 {
682 	struct dwc3_ep			*dep;
683 	struct dwc3			*dwc;
684 	unsigned long			flags;
685 	int				ret;
686 
687 	if (!ep) {
688 		pr_debug("dwc3: invalid parameters\n");
689 		return -EINVAL;
690 	}
691 
692 	dep = to_dwc3_ep(ep);
693 	dwc = dep->dwc;
694 
695 	if (!(dep->flags & DWC3_EP_ENABLED)) {
696 		dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
697 				dep->name);
698 		return 0;
699 	}
700 
701 	spin_lock_irqsave(&dwc->lock, flags);
702 	ret = __dwc3_gadget_ep_disable(dep);
703 	spin_unlock_irqrestore(&dwc->lock, flags);
704 
705 	return ret;
706 }
707 
708 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
709 	gfp_t gfp_flags)
710 {
711 	struct dwc3_request		*req;
712 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
713 
714 	req = kzalloc(sizeof(*req), gfp_flags);
715 	if (!req)
716 		return NULL;
717 
718 	req->epnum	= dep->number;
719 	req->dep	= dep;
720 
721 	trace_dwc3_alloc_request(req);
722 
723 	return &req->request;
724 }
725 
726 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
727 		struct usb_request *request)
728 {
729 	struct dwc3_request		*req = to_dwc3_request(request);
730 
731 	trace_dwc3_free_request(req);
732 	kfree(req);
733 }
734 
735 /**
736  * dwc3_prepare_one_trb - setup one TRB from one request
737  * @dep: endpoint for which this request is prepared
738  * @req: dwc3_request pointer
739  */
740 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
741 		struct dwc3_request *req, dma_addr_t dma,
742 		unsigned length, unsigned last, unsigned chain, unsigned node)
743 {
744 	struct dwc3_trb		*trb;
745 
746 	dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s",
747 			dep->name, req, (unsigned long long) dma,
748 			length, last ? " last" : "",
749 			chain ? " chain" : "");
750 
751 
752 	trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
753 
754 	if (!req->trb) {
755 		dwc3_gadget_move_request_queued(req);
756 		req->trb = trb;
757 		req->trb_dma = dwc3_trb_dma_offset(dep, trb);
758 		req->start_slot = dep->free_slot & DWC3_TRB_MASK;
759 	}
760 
761 	dep->free_slot++;
762 	/* Skip the LINK-TRB on ISOC */
763 	if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
764 			usb_endpoint_xfer_isoc(dep->endpoint.desc))
765 		dep->free_slot++;
766 
767 	trb->size = DWC3_TRB_SIZE_LENGTH(length);
768 	trb->bpl = lower_32_bits(dma);
769 	trb->bph = upper_32_bits(dma);
770 
771 	switch (usb_endpoint_type(dep->endpoint.desc)) {
772 	case USB_ENDPOINT_XFER_CONTROL:
773 		trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
774 		break;
775 
776 	case USB_ENDPOINT_XFER_ISOC:
777 		if (!node)
778 			trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
779 		else
780 			trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
781 		break;
782 
783 	case USB_ENDPOINT_XFER_BULK:
784 	case USB_ENDPOINT_XFER_INT:
785 		trb->ctrl = DWC3_TRBCTL_NORMAL;
786 		break;
787 	default:
788 		/*
789 		 * This is only possible with faulty memory because we
790 		 * checked it already :)
791 		 */
792 		BUG();
793 	}
794 
795 	if (!req->request.no_interrupt && !chain)
796 		trb->ctrl |= DWC3_TRB_CTRL_IOC;
797 
798 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
799 		trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
800 		trb->ctrl |= DWC3_TRB_CTRL_CSP;
801 	} else if (last) {
802 		trb->ctrl |= DWC3_TRB_CTRL_LST;
803 	}
804 
805 	if (chain)
806 		trb->ctrl |= DWC3_TRB_CTRL_CHN;
807 
808 	if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
809 		trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
810 
811 	trb->ctrl |= DWC3_TRB_CTRL_HWO;
812 
813 	trace_dwc3_prepare_trb(dep, trb);
814 }
815 
816 /*
817  * dwc3_prepare_trbs - setup TRBs from requests
818  * @dep: endpoint for which requests are being prepared
819  * @starting: true if the endpoint is idle and no requests are queued.
820  *
821  * The function goes through the requests list and sets up TRBs for the
822  * transfers. The function returns once there are no more TRBs available or
823  * it runs out of requests.
824  */
825 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
826 {
827 	struct dwc3_request	*req, *n;
828 	u32			trbs_left;
829 	u32			max;
830 	unsigned int		last_one = 0;
831 
832 	BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
833 
834 	/* the first request must not be queued */
835 	trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
836 
837 	/* Can't wrap around on a non-isoc EP since there's no link TRB */
838 	if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
839 		max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
840 		if (trbs_left > max)
841 			trbs_left = max;
842 	}
843 
844 	/*
845 	 * If busy & slot are equal than it is either full or empty. If we are
846 	 * starting to process requests then we are empty. Otherwise we are
847 	 * full and don't do anything
848 	 */
849 	if (!trbs_left) {
850 		if (!starting)
851 			return;
852 		trbs_left = DWC3_TRB_NUM;
853 		/*
854 		 * In case we start from scratch, we queue the ISOC requests
855 		 * starting from slot 1. This is done because we use ring
856 		 * buffer and have no LST bit to stop us. Instead, we place
857 		 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
858 		 * after the first request so we start at slot 1 and have
859 		 * 7 requests proceed before we hit the first IOC.
860 		 * Other transfer types don't use the ring buffer and are
861 		 * processed from the first TRB until the last one. Since we
862 		 * don't wrap around we have to start at the beginning.
863 		 */
864 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
865 			dep->busy_slot = 1;
866 			dep->free_slot = 1;
867 		} else {
868 			dep->busy_slot = 0;
869 			dep->free_slot = 0;
870 		}
871 	}
872 
873 	/* The last TRB is a link TRB, not used for xfer */
874 	if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
875 		return;
876 
877 	list_for_each_entry_safe(req, n, &dep->request_list, list) {
878 		unsigned	length;
879 		dma_addr_t	dma;
880 		last_one = false;
881 
882 		if (req->request.num_mapped_sgs > 0) {
883 			struct usb_request *request = &req->request;
884 			struct scatterlist *sg = request->sg;
885 			struct scatterlist *s;
886 			int		i;
887 
888 			for_each_sg(sg, s, request->num_mapped_sgs, i) {
889 				unsigned chain = true;
890 
891 				length = sg_dma_len(s);
892 				dma = sg_dma_address(s);
893 
894 				if (i == (request->num_mapped_sgs - 1) ||
895 						sg_is_last(s)) {
896 					if (list_empty(&dep->request_list))
897 						last_one = true;
898 					chain = false;
899 				}
900 
901 				trbs_left--;
902 				if (!trbs_left)
903 					last_one = true;
904 
905 				if (last_one)
906 					chain = false;
907 
908 				dwc3_prepare_one_trb(dep, req, dma, length,
909 						last_one, chain, i);
910 
911 				if (last_one)
912 					break;
913 			}
914 
915 			if (last_one)
916 				break;
917 		} else {
918 			dma = req->request.dma;
919 			length = req->request.length;
920 			trbs_left--;
921 
922 			if (!trbs_left)
923 				last_one = 1;
924 
925 			/* Is this the last request? */
926 			if (list_is_last(&req->list, &dep->request_list))
927 				last_one = 1;
928 
929 			dwc3_prepare_one_trb(dep, req, dma, length,
930 					last_one, false, 0);
931 
932 			if (last_one)
933 				break;
934 		}
935 	}
936 }
937 
938 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
939 		int start_new)
940 {
941 	struct dwc3_gadget_ep_cmd_params params;
942 	struct dwc3_request		*req;
943 	struct dwc3			*dwc = dep->dwc;
944 	int				ret;
945 	u32				cmd;
946 
947 	if (start_new && (dep->flags & DWC3_EP_BUSY)) {
948 		dwc3_trace(trace_dwc3_gadget, "%s: endpoint busy", dep->name);
949 		return -EBUSY;
950 	}
951 	dep->flags &= ~DWC3_EP_PENDING_REQUEST;
952 
953 	/*
954 	 * If we are getting here after a short-out-packet we don't enqueue any
955 	 * new requests as we try to set the IOC bit only on the last request.
956 	 */
957 	if (start_new) {
958 		if (list_empty(&dep->req_queued))
959 			dwc3_prepare_trbs(dep, start_new);
960 
961 		/* req points to the first request which will be sent */
962 		req = next_request(&dep->req_queued);
963 	} else {
964 		dwc3_prepare_trbs(dep, start_new);
965 
966 		/*
967 		 * req points to the first request where HWO changed from 0 to 1
968 		 */
969 		req = next_request(&dep->req_queued);
970 	}
971 	if (!req) {
972 		dep->flags |= DWC3_EP_PENDING_REQUEST;
973 		return 0;
974 	}
975 
976 	memset(&params, 0, sizeof(params));
977 
978 	if (start_new) {
979 		params.param0 = upper_32_bits(req->trb_dma);
980 		params.param1 = lower_32_bits(req->trb_dma);
981 		cmd = DWC3_DEPCMD_STARTTRANSFER;
982 	} else {
983 		cmd = DWC3_DEPCMD_UPDATETRANSFER;
984 	}
985 
986 	cmd |= DWC3_DEPCMD_PARAM(cmd_param);
987 	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
988 	if (ret < 0) {
989 		dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
990 
991 		/*
992 		 * FIXME we need to iterate over the list of requests
993 		 * here and stop, unmap, free and del each of the linked
994 		 * requests instead of what we do now.
995 		 */
996 		usb_gadget_unmap_request(&dwc->gadget, &req->request,
997 				req->direction);
998 		list_del(&req->list);
999 		return ret;
1000 	}
1001 
1002 	dep->flags |= DWC3_EP_BUSY;
1003 
1004 	if (start_new) {
1005 		dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
1006 				dep->number);
1007 		WARN_ON_ONCE(!dep->resource_index);
1008 	}
1009 
1010 	return 0;
1011 }
1012 
1013 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1014 		struct dwc3_ep *dep, u32 cur_uf)
1015 {
1016 	u32 uf;
1017 
1018 	if (list_empty(&dep->request_list)) {
1019 		dwc3_trace(trace_dwc3_gadget,
1020 				"ISOC ep %s run out for requests",
1021 				dep->name);
1022 		dep->flags |= DWC3_EP_PENDING_REQUEST;
1023 		return;
1024 	}
1025 
1026 	/* 4 micro frames in the future */
1027 	uf = cur_uf + dep->interval * 4;
1028 
1029 	__dwc3_gadget_kick_transfer(dep, uf, 1);
1030 }
1031 
1032 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1033 		struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1034 {
1035 	u32 cur_uf, mask;
1036 
1037 	mask = ~(dep->interval - 1);
1038 	cur_uf = event->parameters & mask;
1039 
1040 	__dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1041 }
1042 
1043 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1044 {
1045 	struct dwc3		*dwc = dep->dwc;
1046 	int			ret;
1047 
1048 	req->request.actual	= 0;
1049 	req->request.status	= -EINPROGRESS;
1050 	req->direction		= dep->direction;
1051 	req->epnum		= dep->number;
1052 
1053 	/*
1054 	 * We only add to our list of requests now and
1055 	 * start consuming the list once we get XferNotReady
1056 	 * IRQ.
1057 	 *
1058 	 * That way, we avoid doing anything that we don't need
1059 	 * to do now and defer it until the point we receive a
1060 	 * particular token from the Host side.
1061 	 *
1062 	 * This will also avoid Host cancelling URBs due to too
1063 	 * many NAKs.
1064 	 */
1065 	ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1066 			dep->direction);
1067 	if (ret)
1068 		return ret;
1069 
1070 	list_add_tail(&req->list, &dep->request_list);
1071 
1072 	/*
1073 	 * There are a few special cases:
1074 	 *
1075 	 * 1. XferNotReady with empty list of requests. We need to kick the
1076 	 *    transfer here in that situation, otherwise we will be NAKing
1077 	 *    forever. If we get XferNotReady before gadget driver has a
1078 	 *    chance to queue a request, we will ACK the IRQ but won't be
1079 	 *    able to receive the data until the next request is queued.
1080 	 *    The following code is handling exactly that.
1081 	 *
1082 	 */
1083 	if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1084 		/*
1085 		 * If xfernotready is already elapsed and it is a case
1086 		 * of isoc transfer, then issue END TRANSFER, so that
1087 		 * you can receive xfernotready again and can have
1088 		 * notion of current microframe.
1089 		 */
1090 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1091 			if (list_empty(&dep->req_queued)) {
1092 				dwc3_stop_active_transfer(dwc, dep->number, true);
1093 				dep->flags = DWC3_EP_ENABLED;
1094 			}
1095 			return 0;
1096 		}
1097 
1098 		ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1099 		if (ret && ret != -EBUSY)
1100 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1101 					dep->name);
1102 		return ret;
1103 	}
1104 
1105 	/*
1106 	 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1107 	 *    kick the transfer here after queuing a request, otherwise the
1108 	 *    core may not see the modified TRB(s).
1109 	 */
1110 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1111 			(dep->flags & DWC3_EP_BUSY) &&
1112 			!(dep->flags & DWC3_EP_MISSED_ISOC)) {
1113 		WARN_ON_ONCE(!dep->resource_index);
1114 		ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
1115 				false);
1116 		if (ret && ret != -EBUSY)
1117 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1118 					dep->name);
1119 		return ret;
1120 	}
1121 
1122 	/*
1123 	 * 4. Stream Capable Bulk Endpoints. We need to start the transfer
1124 	 * right away, otherwise host will not know we have streams to be
1125 	 * handled.
1126 	 */
1127 	if (dep->stream_capable) {
1128 		ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1129 		if (ret && ret != -EBUSY)
1130 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1131 					dep->name);
1132 	}
1133 
1134 	return 0;
1135 }
1136 
1137 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1138 	gfp_t gfp_flags)
1139 {
1140 	struct dwc3_request		*req = to_dwc3_request(request);
1141 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1142 	struct dwc3			*dwc = dep->dwc;
1143 
1144 	unsigned long			flags;
1145 
1146 	int				ret;
1147 
1148 	spin_lock_irqsave(&dwc->lock, flags);
1149 	if (!dep->endpoint.desc) {
1150 		dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1151 				request, ep->name);
1152 		ret = -ESHUTDOWN;
1153 		goto out;
1154 	}
1155 
1156 	if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
1157 				request, req->dep->name)) {
1158 		ret = -EINVAL;
1159 		goto out;
1160 	}
1161 
1162 	trace_dwc3_ep_queue(req);
1163 
1164 	ret = __dwc3_gadget_ep_queue(dep, req);
1165 
1166 out:
1167 	spin_unlock_irqrestore(&dwc->lock, flags);
1168 
1169 	return ret;
1170 }
1171 
1172 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1173 		struct usb_request *request)
1174 {
1175 	struct dwc3_request		*req = to_dwc3_request(request);
1176 	struct dwc3_request		*r = NULL;
1177 
1178 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1179 	struct dwc3			*dwc = dep->dwc;
1180 
1181 	unsigned long			flags;
1182 	int				ret = 0;
1183 
1184 	trace_dwc3_ep_dequeue(req);
1185 
1186 	spin_lock_irqsave(&dwc->lock, flags);
1187 
1188 	list_for_each_entry(r, &dep->request_list, list) {
1189 		if (r == req)
1190 			break;
1191 	}
1192 
1193 	if (r != req) {
1194 		list_for_each_entry(r, &dep->req_queued, list) {
1195 			if (r == req)
1196 				break;
1197 		}
1198 		if (r == req) {
1199 			/* wait until it is processed */
1200 			dwc3_stop_active_transfer(dwc, dep->number, true);
1201 			goto out1;
1202 		}
1203 		dev_err(dwc->dev, "request %p was not queued to %s\n",
1204 				request, ep->name);
1205 		ret = -EINVAL;
1206 		goto out0;
1207 	}
1208 
1209 out1:
1210 	/* giveback the request */
1211 	dwc3_gadget_giveback(dep, req, -ECONNRESET);
1212 
1213 out0:
1214 	spin_unlock_irqrestore(&dwc->lock, flags);
1215 
1216 	return ret;
1217 }
1218 
1219 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1220 {
1221 	struct dwc3_gadget_ep_cmd_params	params;
1222 	struct dwc3				*dwc = dep->dwc;
1223 	int					ret;
1224 
1225 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1226 		dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1227 		return -EINVAL;
1228 	}
1229 
1230 	memset(&params, 0x00, sizeof(params));
1231 
1232 	if (value) {
1233 		if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
1234 				(!list_empty(&dep->req_queued) ||
1235 				 !list_empty(&dep->request_list)))) {
1236 			dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
1237 					dep->name);
1238 			return -EAGAIN;
1239 		}
1240 
1241 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1242 			DWC3_DEPCMD_SETSTALL, &params);
1243 		if (ret)
1244 			dev_err(dwc->dev, "failed to set STALL on %s\n",
1245 					dep->name);
1246 		else
1247 			dep->flags |= DWC3_EP_STALL;
1248 	} else {
1249 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1250 			DWC3_DEPCMD_CLEARSTALL, &params);
1251 		if (ret)
1252 			dev_err(dwc->dev, "failed to clear STALL on %s\n",
1253 					dep->name);
1254 		else
1255 			dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1256 	}
1257 
1258 	return ret;
1259 }
1260 
1261 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1262 {
1263 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1264 	struct dwc3			*dwc = dep->dwc;
1265 
1266 	unsigned long			flags;
1267 
1268 	int				ret;
1269 
1270 	spin_lock_irqsave(&dwc->lock, flags);
1271 	ret = __dwc3_gadget_ep_set_halt(dep, value, false);
1272 	spin_unlock_irqrestore(&dwc->lock, flags);
1273 
1274 	return ret;
1275 }
1276 
1277 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1278 {
1279 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1280 	struct dwc3			*dwc = dep->dwc;
1281 	unsigned long			flags;
1282 	int				ret;
1283 
1284 	spin_lock_irqsave(&dwc->lock, flags);
1285 	dep->flags |= DWC3_EP_WEDGE;
1286 
1287 	if (dep->number == 0 || dep->number == 1)
1288 		ret = __dwc3_gadget_ep0_set_halt(ep, 1);
1289 	else
1290 		ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
1291 	spin_unlock_irqrestore(&dwc->lock, flags);
1292 
1293 	return ret;
1294 }
1295 
1296 /* -------------------------------------------------------------------------- */
1297 
1298 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1299 	.bLength	= USB_DT_ENDPOINT_SIZE,
1300 	.bDescriptorType = USB_DT_ENDPOINT,
1301 	.bmAttributes	= USB_ENDPOINT_XFER_CONTROL,
1302 };
1303 
1304 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1305 	.enable		= dwc3_gadget_ep0_enable,
1306 	.disable	= dwc3_gadget_ep0_disable,
1307 	.alloc_request	= dwc3_gadget_ep_alloc_request,
1308 	.free_request	= dwc3_gadget_ep_free_request,
1309 	.queue		= dwc3_gadget_ep0_queue,
1310 	.dequeue	= dwc3_gadget_ep_dequeue,
1311 	.set_halt	= dwc3_gadget_ep0_set_halt,
1312 	.set_wedge	= dwc3_gadget_ep_set_wedge,
1313 };
1314 
1315 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1316 	.enable		= dwc3_gadget_ep_enable,
1317 	.disable	= dwc3_gadget_ep_disable,
1318 	.alloc_request	= dwc3_gadget_ep_alloc_request,
1319 	.free_request	= dwc3_gadget_ep_free_request,
1320 	.queue		= dwc3_gadget_ep_queue,
1321 	.dequeue	= dwc3_gadget_ep_dequeue,
1322 	.set_halt	= dwc3_gadget_ep_set_halt,
1323 	.set_wedge	= dwc3_gadget_ep_set_wedge,
1324 };
1325 
1326 /* -------------------------------------------------------------------------- */
1327 
1328 static int dwc3_gadget_get_frame(struct usb_gadget *g)
1329 {
1330 	struct dwc3		*dwc = gadget_to_dwc(g);
1331 	u32			reg;
1332 
1333 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1334 	return DWC3_DSTS_SOFFN(reg);
1335 }
1336 
1337 static int dwc3_gadget_wakeup(struct usb_gadget *g)
1338 {
1339 	struct dwc3		*dwc = gadget_to_dwc(g);
1340 
1341 	unsigned long		timeout;
1342 	unsigned long		flags;
1343 
1344 	u32			reg;
1345 
1346 	int			ret = 0;
1347 
1348 	u8			link_state;
1349 	u8			speed;
1350 
1351 	spin_lock_irqsave(&dwc->lock, flags);
1352 
1353 	/*
1354 	 * According to the Databook Remote wakeup request should
1355 	 * be issued only when the device is in early suspend state.
1356 	 *
1357 	 * We can check that via USB Link State bits in DSTS register.
1358 	 */
1359 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1360 
1361 	speed = reg & DWC3_DSTS_CONNECTSPD;
1362 	if (speed == DWC3_DSTS_SUPERSPEED) {
1363 		dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1364 		ret = -EINVAL;
1365 		goto out;
1366 	}
1367 
1368 	link_state = DWC3_DSTS_USBLNKST(reg);
1369 
1370 	switch (link_state) {
1371 	case DWC3_LINK_STATE_RX_DET:	/* in HS, means Early Suspend */
1372 	case DWC3_LINK_STATE_U3:	/* in HS, means SUSPEND */
1373 		break;
1374 	default:
1375 		dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1376 				link_state);
1377 		ret = -EINVAL;
1378 		goto out;
1379 	}
1380 
1381 	ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1382 	if (ret < 0) {
1383 		dev_err(dwc->dev, "failed to put link in Recovery\n");
1384 		goto out;
1385 	}
1386 
1387 	/* Recent versions do this automatically */
1388 	if (dwc->revision < DWC3_REVISION_194A) {
1389 		/* write zeroes to Link Change Request */
1390 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1391 		reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1392 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1393 	}
1394 
1395 	/* poll until Link State changes to ON */
1396 	timeout = jiffies + msecs_to_jiffies(100);
1397 
1398 	while (!time_after(jiffies, timeout)) {
1399 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1400 
1401 		/* in HS, means ON */
1402 		if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1403 			break;
1404 	}
1405 
1406 	if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1407 		dev_err(dwc->dev, "failed to send remote wakeup\n");
1408 		ret = -EINVAL;
1409 	}
1410 
1411 out:
1412 	spin_unlock_irqrestore(&dwc->lock, flags);
1413 
1414 	return ret;
1415 }
1416 
1417 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1418 		int is_selfpowered)
1419 {
1420 	struct dwc3		*dwc = gadget_to_dwc(g);
1421 	unsigned long		flags;
1422 
1423 	spin_lock_irqsave(&dwc->lock, flags);
1424 	g->is_selfpowered = !!is_selfpowered;
1425 	spin_unlock_irqrestore(&dwc->lock, flags);
1426 
1427 	return 0;
1428 }
1429 
1430 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
1431 {
1432 	u32			reg;
1433 	u32			timeout = 500;
1434 
1435 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1436 	if (is_on) {
1437 		if (dwc->revision <= DWC3_REVISION_187A) {
1438 			reg &= ~DWC3_DCTL_TRGTULST_MASK;
1439 			reg |= DWC3_DCTL_TRGTULST_RX_DET;
1440 		}
1441 
1442 		if (dwc->revision >= DWC3_REVISION_194A)
1443 			reg &= ~DWC3_DCTL_KEEP_CONNECT;
1444 		reg |= DWC3_DCTL_RUN_STOP;
1445 
1446 		if (dwc->has_hibernation)
1447 			reg |= DWC3_DCTL_KEEP_CONNECT;
1448 
1449 		dwc->pullups_connected = true;
1450 	} else {
1451 		reg &= ~DWC3_DCTL_RUN_STOP;
1452 
1453 		if (dwc->has_hibernation && !suspend)
1454 			reg &= ~DWC3_DCTL_KEEP_CONNECT;
1455 
1456 		dwc->pullups_connected = false;
1457 	}
1458 
1459 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1460 
1461 	do {
1462 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1463 		if (is_on) {
1464 			if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1465 				break;
1466 		} else {
1467 			if (reg & DWC3_DSTS_DEVCTRLHLT)
1468 				break;
1469 		}
1470 		timeout--;
1471 		if (!timeout)
1472 			return -ETIMEDOUT;
1473 		udelay(1);
1474 	} while (1);
1475 
1476 	dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
1477 			dwc->gadget_driver
1478 			? dwc->gadget_driver->function : "no-function",
1479 			is_on ? "connect" : "disconnect");
1480 
1481 	return 0;
1482 }
1483 
1484 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1485 {
1486 	struct dwc3		*dwc = gadget_to_dwc(g);
1487 	unsigned long		flags;
1488 	int			ret;
1489 
1490 	is_on = !!is_on;
1491 
1492 	spin_lock_irqsave(&dwc->lock, flags);
1493 	ret = dwc3_gadget_run_stop(dwc, is_on, false);
1494 	spin_unlock_irqrestore(&dwc->lock, flags);
1495 
1496 	return ret;
1497 }
1498 
1499 static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1500 {
1501 	u32			reg;
1502 
1503 	/* Enable all but Start and End of Frame IRQs */
1504 	reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1505 			DWC3_DEVTEN_EVNTOVERFLOWEN |
1506 			DWC3_DEVTEN_CMDCMPLTEN |
1507 			DWC3_DEVTEN_ERRTICERREN |
1508 			DWC3_DEVTEN_WKUPEVTEN |
1509 			DWC3_DEVTEN_ULSTCNGEN |
1510 			DWC3_DEVTEN_CONNECTDONEEN |
1511 			DWC3_DEVTEN_USBRSTEN |
1512 			DWC3_DEVTEN_DISCONNEVTEN);
1513 
1514 	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1515 }
1516 
1517 static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1518 {
1519 	/* mask all interrupts */
1520 	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1521 }
1522 
1523 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
1524 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
1525 
1526 static int dwc3_gadget_start(struct usb_gadget *g,
1527 		struct usb_gadget_driver *driver)
1528 {
1529 	struct dwc3		*dwc = gadget_to_dwc(g);
1530 	struct dwc3_ep		*dep;
1531 	unsigned long		flags;
1532 	int			ret = 0;
1533 	int			irq;
1534 	u32			reg;
1535 
1536 	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1537 	ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1538 			IRQF_SHARED, "dwc3", dwc);
1539 	if (ret) {
1540 		dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1541 				irq, ret);
1542 		goto err0;
1543 	}
1544 
1545 	spin_lock_irqsave(&dwc->lock, flags);
1546 
1547 	if (dwc->gadget_driver) {
1548 		dev_err(dwc->dev, "%s is already bound to %s\n",
1549 				dwc->gadget.name,
1550 				dwc->gadget_driver->driver.name);
1551 		ret = -EBUSY;
1552 		goto err1;
1553 	}
1554 
1555 	dwc->gadget_driver	= driver;
1556 
1557 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1558 	reg &= ~(DWC3_DCFG_SPEED_MASK);
1559 
1560 	/**
1561 	 * WORKAROUND: DWC3 revision < 2.20a have an issue
1562 	 * which would cause metastability state on Run/Stop
1563 	 * bit if we try to force the IP to USB2-only mode.
1564 	 *
1565 	 * Because of that, we cannot configure the IP to any
1566 	 * speed other than the SuperSpeed
1567 	 *
1568 	 * Refers to:
1569 	 *
1570 	 * STAR#9000525659: Clock Domain Crossing on DCTL in
1571 	 * USB 2.0 Mode
1572 	 */
1573 	if (dwc->revision < DWC3_REVISION_220A) {
1574 		reg |= DWC3_DCFG_SUPERSPEED;
1575 	} else {
1576 		switch (dwc->maximum_speed) {
1577 		case USB_SPEED_LOW:
1578 			reg |= DWC3_DSTS_LOWSPEED;
1579 			break;
1580 		case USB_SPEED_FULL:
1581 			reg |= DWC3_DSTS_FULLSPEED1;
1582 			break;
1583 		case USB_SPEED_HIGH:
1584 			reg |= DWC3_DSTS_HIGHSPEED;
1585 			break;
1586 		case USB_SPEED_SUPER:	/* FALLTHROUGH */
1587 		case USB_SPEED_UNKNOWN:	/* FALTHROUGH */
1588 		default:
1589 			reg |= DWC3_DSTS_SUPERSPEED;
1590 		}
1591 	}
1592 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1593 
1594 	dwc->start_config_issued = false;
1595 
1596 	/* Start with SuperSpeed Default */
1597 	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1598 
1599 	dep = dwc->eps[0];
1600 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1601 			false);
1602 	if (ret) {
1603 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1604 		goto err2;
1605 	}
1606 
1607 	dep = dwc->eps[1];
1608 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1609 			false);
1610 	if (ret) {
1611 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1612 		goto err3;
1613 	}
1614 
1615 	/* begin to receive SETUP packets */
1616 	dwc->ep0state = EP0_SETUP_PHASE;
1617 	dwc3_ep0_out_start(dwc);
1618 
1619 	dwc3_gadget_enable_irq(dwc);
1620 
1621 	spin_unlock_irqrestore(&dwc->lock, flags);
1622 
1623 	return 0;
1624 
1625 err3:
1626 	__dwc3_gadget_ep_disable(dwc->eps[0]);
1627 
1628 err2:
1629 	dwc->gadget_driver = NULL;
1630 
1631 err1:
1632 	spin_unlock_irqrestore(&dwc->lock, flags);
1633 
1634 	free_irq(irq, dwc);
1635 
1636 err0:
1637 	return ret;
1638 }
1639 
1640 static int dwc3_gadget_stop(struct usb_gadget *g)
1641 {
1642 	struct dwc3		*dwc = gadget_to_dwc(g);
1643 	unsigned long		flags;
1644 	int			irq;
1645 
1646 	spin_lock_irqsave(&dwc->lock, flags);
1647 
1648 	dwc3_gadget_disable_irq(dwc);
1649 	__dwc3_gadget_ep_disable(dwc->eps[0]);
1650 	__dwc3_gadget_ep_disable(dwc->eps[1]);
1651 
1652 	dwc->gadget_driver	= NULL;
1653 
1654 	spin_unlock_irqrestore(&dwc->lock, flags);
1655 
1656 	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1657 	free_irq(irq, dwc);
1658 
1659 	return 0;
1660 }
1661 
1662 static const struct usb_gadget_ops dwc3_gadget_ops = {
1663 	.get_frame		= dwc3_gadget_get_frame,
1664 	.wakeup			= dwc3_gadget_wakeup,
1665 	.set_selfpowered	= dwc3_gadget_set_selfpowered,
1666 	.pullup			= dwc3_gadget_pullup,
1667 	.udc_start		= dwc3_gadget_start,
1668 	.udc_stop		= dwc3_gadget_stop,
1669 };
1670 
1671 /* -------------------------------------------------------------------------- */
1672 
1673 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1674 		u8 num, u32 direction)
1675 {
1676 	struct dwc3_ep			*dep;
1677 	u8				i;
1678 
1679 	for (i = 0; i < num; i++) {
1680 		u8 epnum = (i << 1) | (!!direction);
1681 
1682 		dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1683 		if (!dep)
1684 			return -ENOMEM;
1685 
1686 		dep->dwc = dwc;
1687 		dep->number = epnum;
1688 		dep->direction = !!direction;
1689 		dwc->eps[epnum] = dep;
1690 
1691 		snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1692 				(epnum & 1) ? "in" : "out");
1693 
1694 		dep->endpoint.name = dep->name;
1695 
1696 		dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
1697 
1698 		if (epnum == 0 || epnum == 1) {
1699 			usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
1700 			dep->endpoint.maxburst = 1;
1701 			dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1702 			if (!epnum)
1703 				dwc->gadget.ep0 = &dep->endpoint;
1704 		} else {
1705 			int		ret;
1706 
1707 			usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
1708 			dep->endpoint.max_streams = 15;
1709 			dep->endpoint.ops = &dwc3_gadget_ep_ops;
1710 			list_add_tail(&dep->endpoint.ep_list,
1711 					&dwc->gadget.ep_list);
1712 
1713 			ret = dwc3_alloc_trb_pool(dep);
1714 			if (ret)
1715 				return ret;
1716 		}
1717 
1718 		if (epnum == 0 || epnum == 1) {
1719 			dep->endpoint.caps.type_control = true;
1720 		} else {
1721 			dep->endpoint.caps.type_iso = true;
1722 			dep->endpoint.caps.type_bulk = true;
1723 			dep->endpoint.caps.type_int = true;
1724 		}
1725 
1726 		dep->endpoint.caps.dir_in = !!direction;
1727 		dep->endpoint.caps.dir_out = !direction;
1728 
1729 		INIT_LIST_HEAD(&dep->request_list);
1730 		INIT_LIST_HEAD(&dep->req_queued);
1731 	}
1732 
1733 	return 0;
1734 }
1735 
1736 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1737 {
1738 	int				ret;
1739 
1740 	INIT_LIST_HEAD(&dwc->gadget.ep_list);
1741 
1742 	ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1743 	if (ret < 0) {
1744 		dwc3_trace(trace_dwc3_gadget,
1745 				"failed to allocate OUT endpoints");
1746 		return ret;
1747 	}
1748 
1749 	ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1750 	if (ret < 0) {
1751 		dwc3_trace(trace_dwc3_gadget,
1752 				"failed to allocate IN endpoints");
1753 		return ret;
1754 	}
1755 
1756 	return 0;
1757 }
1758 
1759 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1760 {
1761 	struct dwc3_ep			*dep;
1762 	u8				epnum;
1763 
1764 	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1765 		dep = dwc->eps[epnum];
1766 		if (!dep)
1767 			continue;
1768 		/*
1769 		 * Physical endpoints 0 and 1 are special; they form the
1770 		 * bi-directional USB endpoint 0.
1771 		 *
1772 		 * For those two physical endpoints, we don't allocate a TRB
1773 		 * pool nor do we add them the endpoints list. Due to that, we
1774 		 * shouldn't do these two operations otherwise we would end up
1775 		 * with all sorts of bugs when removing dwc3.ko.
1776 		 */
1777 		if (epnum != 0 && epnum != 1) {
1778 			dwc3_free_trb_pool(dep);
1779 			list_del(&dep->endpoint.ep_list);
1780 		}
1781 
1782 		kfree(dep);
1783 	}
1784 }
1785 
1786 /* -------------------------------------------------------------------------- */
1787 
1788 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1789 		struct dwc3_request *req, struct dwc3_trb *trb,
1790 		const struct dwc3_event_depevt *event, int status)
1791 {
1792 	unsigned int		count;
1793 	unsigned int		s_pkt = 0;
1794 	unsigned int		trb_status;
1795 
1796 	trace_dwc3_complete_trb(dep, trb);
1797 
1798 	if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1799 		/*
1800 		 * We continue despite the error. There is not much we
1801 		 * can do. If we don't clean it up we loop forever. If
1802 		 * we skip the TRB then it gets overwritten after a
1803 		 * while since we use them in a ring buffer. A BUG()
1804 		 * would help. Lets hope that if this occurs, someone
1805 		 * fixes the root cause instead of looking away :)
1806 		 */
1807 		dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1808 				dep->name, trb);
1809 	count = trb->size & DWC3_TRB_SIZE_MASK;
1810 
1811 	if (dep->direction) {
1812 		if (count) {
1813 			trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1814 			if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1815 				dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1816 						dep->name);
1817 				/*
1818 				 * If missed isoc occurred and there is
1819 				 * no request queued then issue END
1820 				 * TRANSFER, so that core generates
1821 				 * next xfernotready and we will issue
1822 				 * a fresh START TRANSFER.
1823 				 * If there are still queued request
1824 				 * then wait, do not issue either END
1825 				 * or UPDATE TRANSFER, just attach next
1826 				 * request in request_list during
1827 				 * giveback.If any future queued request
1828 				 * is successfully transferred then we
1829 				 * will issue UPDATE TRANSFER for all
1830 				 * request in the request_list.
1831 				 */
1832 				dep->flags |= DWC3_EP_MISSED_ISOC;
1833 			} else {
1834 				dev_err(dwc->dev, "incomplete IN transfer %s\n",
1835 						dep->name);
1836 				status = -ECONNRESET;
1837 			}
1838 		} else {
1839 			dep->flags &= ~DWC3_EP_MISSED_ISOC;
1840 		}
1841 	} else {
1842 		if (count && (event->status & DEPEVT_STATUS_SHORT))
1843 			s_pkt = 1;
1844 	}
1845 
1846 	/*
1847 	 * We assume here we will always receive the entire data block
1848 	 * which we should receive. Meaning, if we program RX to
1849 	 * receive 4K but we receive only 2K, we assume that's all we
1850 	 * should receive and we simply bounce the request back to the
1851 	 * gadget driver for further processing.
1852 	 */
1853 	req->request.actual += req->request.length - count;
1854 	if (s_pkt)
1855 		return 1;
1856 	if ((event->status & DEPEVT_STATUS_LST) &&
1857 			(trb->ctrl & (DWC3_TRB_CTRL_LST |
1858 				DWC3_TRB_CTRL_HWO)))
1859 		return 1;
1860 	if ((event->status & DEPEVT_STATUS_IOC) &&
1861 			(trb->ctrl & DWC3_TRB_CTRL_IOC))
1862 		return 1;
1863 	return 0;
1864 }
1865 
1866 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1867 		const struct dwc3_event_depevt *event, int status)
1868 {
1869 	struct dwc3_request	*req;
1870 	struct dwc3_trb		*trb;
1871 	unsigned int		slot;
1872 	unsigned int		i;
1873 	int			ret;
1874 
1875 	req = next_request(&dep->req_queued);
1876 	if (!req) {
1877 		WARN_ON_ONCE(1);
1878 		return 1;
1879 	}
1880 	i = 0;
1881 	do {
1882 		slot = req->start_slot + i;
1883 		if ((slot == DWC3_TRB_NUM - 1) &&
1884 				usb_endpoint_xfer_isoc(dep->endpoint.desc))
1885 			slot++;
1886 		slot %= DWC3_TRB_NUM;
1887 		trb = &dep->trb_pool[slot];
1888 
1889 		ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
1890 				event, status);
1891 		if (ret)
1892 			break;
1893 	} while (++i < req->request.num_mapped_sgs);
1894 
1895 	dwc3_gadget_giveback(dep, req, status);
1896 
1897 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1898 			list_empty(&dep->req_queued)) {
1899 		if (list_empty(&dep->request_list)) {
1900 			/*
1901 			 * If there is no entry in request list then do
1902 			 * not issue END TRANSFER now. Just set PENDING
1903 			 * flag, so that END TRANSFER is issued when an
1904 			 * entry is added into request list.
1905 			 */
1906 			dep->flags = DWC3_EP_PENDING_REQUEST;
1907 		} else {
1908 			dwc3_stop_active_transfer(dwc, dep->number, true);
1909 			dep->flags = DWC3_EP_ENABLED;
1910 		}
1911 		return 1;
1912 	}
1913 
1914 	return 1;
1915 }
1916 
1917 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1918 		struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1919 {
1920 	unsigned		status = 0;
1921 	int			clean_busy;
1922 	u32			is_xfer_complete;
1923 
1924 	is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
1925 
1926 	if (event->status & DEPEVT_STATUS_BUSERR)
1927 		status = -ECONNRESET;
1928 
1929 	clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
1930 	if (clean_busy && (is_xfer_complete ||
1931 				usb_endpoint_xfer_isoc(dep->endpoint.desc)))
1932 		dep->flags &= ~DWC3_EP_BUSY;
1933 
1934 	/*
1935 	 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1936 	 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1937 	 */
1938 	if (dwc->revision < DWC3_REVISION_183A) {
1939 		u32		reg;
1940 		int		i;
1941 
1942 		for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1943 			dep = dwc->eps[i];
1944 
1945 			if (!(dep->flags & DWC3_EP_ENABLED))
1946 				continue;
1947 
1948 			if (!list_empty(&dep->req_queued))
1949 				return;
1950 		}
1951 
1952 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1953 		reg |= dwc->u1u2;
1954 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1955 
1956 		dwc->u1u2 = 0;
1957 	}
1958 }
1959 
1960 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1961 		const struct dwc3_event_depevt *event)
1962 {
1963 	struct dwc3_ep		*dep;
1964 	u8			epnum = event->endpoint_number;
1965 
1966 	dep = dwc->eps[epnum];
1967 
1968 	if (!(dep->flags & DWC3_EP_ENABLED))
1969 		return;
1970 
1971 	if (epnum == 0 || epnum == 1) {
1972 		dwc3_ep0_interrupt(dwc, event);
1973 		return;
1974 	}
1975 
1976 	switch (event->endpoint_event) {
1977 	case DWC3_DEPEVT_XFERCOMPLETE:
1978 		dep->resource_index = 0;
1979 
1980 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1981 			dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1982 					dep->name);
1983 			return;
1984 		}
1985 
1986 		dwc3_endpoint_transfer_complete(dwc, dep, event);
1987 		break;
1988 	case DWC3_DEPEVT_XFERINPROGRESS:
1989 		dwc3_endpoint_transfer_complete(dwc, dep, event);
1990 		break;
1991 	case DWC3_DEPEVT_XFERNOTREADY:
1992 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1993 			dwc3_gadget_start_isoc(dwc, dep, event);
1994 		} else {
1995 			int ret;
1996 
1997 			dwc3_trace(trace_dwc3_gadget, "%s: reason %s",
1998 					dep->name, event->status &
1999 					DEPEVT_STATUS_TRANSFER_ACTIVE
2000 					? "Transfer Active"
2001 					: "Transfer Not Active");
2002 
2003 			ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
2004 			if (!ret || ret == -EBUSY)
2005 				return;
2006 
2007 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
2008 					dep->name);
2009 		}
2010 
2011 		break;
2012 	case DWC3_DEPEVT_STREAMEVT:
2013 		if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
2014 			dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
2015 					dep->name);
2016 			return;
2017 		}
2018 
2019 		switch (event->status) {
2020 		case DEPEVT_STREAMEVT_FOUND:
2021 			dwc3_trace(trace_dwc3_gadget,
2022 					"Stream %d found and started",
2023 					event->parameters);
2024 
2025 			break;
2026 		case DEPEVT_STREAMEVT_NOTFOUND:
2027 			/* FALLTHROUGH */
2028 		default:
2029 			dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
2030 		}
2031 		break;
2032 	case DWC3_DEPEVT_RXTXFIFOEVT:
2033 		dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
2034 		break;
2035 	case DWC3_DEPEVT_EPCMDCMPLT:
2036 		dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
2037 		break;
2038 	}
2039 }
2040 
2041 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2042 {
2043 	if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2044 		spin_unlock(&dwc->lock);
2045 		dwc->gadget_driver->disconnect(&dwc->gadget);
2046 		spin_lock(&dwc->lock);
2047 	}
2048 }
2049 
2050 static void dwc3_suspend_gadget(struct dwc3 *dwc)
2051 {
2052 	if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
2053 		spin_unlock(&dwc->lock);
2054 		dwc->gadget_driver->suspend(&dwc->gadget);
2055 		spin_lock(&dwc->lock);
2056 	}
2057 }
2058 
2059 static void dwc3_resume_gadget(struct dwc3 *dwc)
2060 {
2061 	if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2062 		spin_unlock(&dwc->lock);
2063 		dwc->gadget_driver->resume(&dwc->gadget);
2064 		spin_lock(&dwc->lock);
2065 	}
2066 }
2067 
2068 static void dwc3_reset_gadget(struct dwc3 *dwc)
2069 {
2070 	if (!dwc->gadget_driver)
2071 		return;
2072 
2073 	if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2074 		spin_unlock(&dwc->lock);
2075 		usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
2076 		spin_lock(&dwc->lock);
2077 	}
2078 }
2079 
2080 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
2081 {
2082 	struct dwc3_ep *dep;
2083 	struct dwc3_gadget_ep_cmd_params params;
2084 	u32 cmd;
2085 	int ret;
2086 
2087 	dep = dwc->eps[epnum];
2088 
2089 	if (!dep->resource_index)
2090 		return;
2091 
2092 	/*
2093 	 * NOTICE: We are violating what the Databook says about the
2094 	 * EndTransfer command. Ideally we would _always_ wait for the
2095 	 * EndTransfer Command Completion IRQ, but that's causing too
2096 	 * much trouble synchronizing between us and gadget driver.
2097 	 *
2098 	 * We have discussed this with the IP Provider and it was
2099 	 * suggested to giveback all requests here, but give HW some
2100 	 * extra time to synchronize with the interconnect. We're using
2101 	 * an arbitrary 100us delay for that.
2102 	 *
2103 	 * Note also that a similar handling was tested by Synopsys
2104 	 * (thanks a lot Paul) and nothing bad has come out of it.
2105 	 * In short, what we're doing is:
2106 	 *
2107 	 * - Issue EndTransfer WITH CMDIOC bit set
2108 	 * - Wait 100us
2109 	 */
2110 
2111 	cmd = DWC3_DEPCMD_ENDTRANSFER;
2112 	cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2113 	cmd |= DWC3_DEPCMD_CMDIOC;
2114 	cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
2115 	memset(&params, 0, sizeof(params));
2116 	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
2117 	WARN_ON_ONCE(ret);
2118 	dep->resource_index = 0;
2119 	dep->flags &= ~DWC3_EP_BUSY;
2120 	udelay(100);
2121 }
2122 
2123 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2124 {
2125 	u32 epnum;
2126 
2127 	for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2128 		struct dwc3_ep *dep;
2129 
2130 		dep = dwc->eps[epnum];
2131 		if (!dep)
2132 			continue;
2133 
2134 		if (!(dep->flags & DWC3_EP_ENABLED))
2135 			continue;
2136 
2137 		dwc3_remove_requests(dwc, dep);
2138 	}
2139 }
2140 
2141 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2142 {
2143 	u32 epnum;
2144 
2145 	for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2146 		struct dwc3_ep *dep;
2147 		struct dwc3_gadget_ep_cmd_params params;
2148 		int ret;
2149 
2150 		dep = dwc->eps[epnum];
2151 		if (!dep)
2152 			continue;
2153 
2154 		if (!(dep->flags & DWC3_EP_STALL))
2155 			continue;
2156 
2157 		dep->flags &= ~DWC3_EP_STALL;
2158 
2159 		memset(&params, 0, sizeof(params));
2160 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2161 				DWC3_DEPCMD_CLEARSTALL, &params);
2162 		WARN_ON_ONCE(ret);
2163 	}
2164 }
2165 
2166 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2167 {
2168 	int			reg;
2169 
2170 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2171 	reg &= ~DWC3_DCTL_INITU1ENA;
2172 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2173 
2174 	reg &= ~DWC3_DCTL_INITU2ENA;
2175 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2176 
2177 	dwc3_disconnect_gadget(dwc);
2178 	dwc->start_config_issued = false;
2179 
2180 	dwc->gadget.speed = USB_SPEED_UNKNOWN;
2181 	dwc->setup_packet_pending = false;
2182 	usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
2183 }
2184 
2185 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2186 {
2187 	u32			reg;
2188 
2189 	/*
2190 	 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2191 	 * would cause a missing Disconnect Event if there's a
2192 	 * pending Setup Packet in the FIFO.
2193 	 *
2194 	 * There's no suggested workaround on the official Bug
2195 	 * report, which states that "unless the driver/application
2196 	 * is doing any special handling of a disconnect event,
2197 	 * there is no functional issue".
2198 	 *
2199 	 * Unfortunately, it turns out that we _do_ some special
2200 	 * handling of a disconnect event, namely complete all
2201 	 * pending transfers, notify gadget driver of the
2202 	 * disconnection, and so on.
2203 	 *
2204 	 * Our suggested workaround is to follow the Disconnect
2205 	 * Event steps here, instead, based on a setup_packet_pending
2206 	 * flag. Such flag gets set whenever we have a XferNotReady
2207 	 * event on EP0 and gets cleared on XferComplete for the
2208 	 * same endpoint.
2209 	 *
2210 	 * Refers to:
2211 	 *
2212 	 * STAR#9000466709: RTL: Device : Disconnect event not
2213 	 * generated if setup packet pending in FIFO
2214 	 */
2215 	if (dwc->revision < DWC3_REVISION_188A) {
2216 		if (dwc->setup_packet_pending)
2217 			dwc3_gadget_disconnect_interrupt(dwc);
2218 	}
2219 
2220 	dwc3_reset_gadget(dwc);
2221 
2222 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2223 	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2224 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2225 	dwc->test_mode = false;
2226 
2227 	dwc3_stop_active_transfers(dwc);
2228 	dwc3_clear_stall_all_ep(dwc);
2229 	dwc->start_config_issued = false;
2230 
2231 	/* Reset device address to zero */
2232 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2233 	reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2234 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2235 }
2236 
2237 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2238 {
2239 	u32 reg;
2240 	u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2241 
2242 	/*
2243 	 * We change the clock only at SS but I dunno why I would want to do
2244 	 * this. Maybe it becomes part of the power saving plan.
2245 	 */
2246 
2247 	if (speed != DWC3_DSTS_SUPERSPEED)
2248 		return;
2249 
2250 	/*
2251 	 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2252 	 * each time on Connect Done.
2253 	 */
2254 	if (!usb30_clock)
2255 		return;
2256 
2257 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2258 	reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2259 	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2260 }
2261 
2262 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2263 {
2264 	struct dwc3_ep		*dep;
2265 	int			ret;
2266 	u32			reg;
2267 	u8			speed;
2268 
2269 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2270 	speed = reg & DWC3_DSTS_CONNECTSPD;
2271 	dwc->speed = speed;
2272 
2273 	dwc3_update_ram_clk_sel(dwc, speed);
2274 
2275 	switch (speed) {
2276 	case DWC3_DCFG_SUPERSPEED:
2277 		/*
2278 		 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2279 		 * would cause a missing USB3 Reset event.
2280 		 *
2281 		 * In such situations, we should force a USB3 Reset
2282 		 * event by calling our dwc3_gadget_reset_interrupt()
2283 		 * routine.
2284 		 *
2285 		 * Refers to:
2286 		 *
2287 		 * STAR#9000483510: RTL: SS : USB3 reset event may
2288 		 * not be generated always when the link enters poll
2289 		 */
2290 		if (dwc->revision < DWC3_REVISION_190A)
2291 			dwc3_gadget_reset_interrupt(dwc);
2292 
2293 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2294 		dwc->gadget.ep0->maxpacket = 512;
2295 		dwc->gadget.speed = USB_SPEED_SUPER;
2296 		break;
2297 	case DWC3_DCFG_HIGHSPEED:
2298 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2299 		dwc->gadget.ep0->maxpacket = 64;
2300 		dwc->gadget.speed = USB_SPEED_HIGH;
2301 		break;
2302 	case DWC3_DCFG_FULLSPEED2:
2303 	case DWC3_DCFG_FULLSPEED1:
2304 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2305 		dwc->gadget.ep0->maxpacket = 64;
2306 		dwc->gadget.speed = USB_SPEED_FULL;
2307 		break;
2308 	case DWC3_DCFG_LOWSPEED:
2309 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2310 		dwc->gadget.ep0->maxpacket = 8;
2311 		dwc->gadget.speed = USB_SPEED_LOW;
2312 		break;
2313 	}
2314 
2315 	/* Enable USB2 LPM Capability */
2316 
2317 	if ((dwc->revision > DWC3_REVISION_194A)
2318 			&& (speed != DWC3_DCFG_SUPERSPEED)) {
2319 		reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2320 		reg |= DWC3_DCFG_LPM_CAP;
2321 		dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2322 
2323 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2324 		reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2325 
2326 		reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2327 
2328 		/*
2329 		 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2330 		 * DCFG.LPMCap is set, core responses with an ACK and the
2331 		 * BESL value in the LPM token is less than or equal to LPM
2332 		 * NYET threshold.
2333 		 */
2334 		WARN_ONCE(dwc->revision < DWC3_REVISION_240A
2335 				&& dwc->has_lpm_erratum,
2336 				"LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2337 
2338 		if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2339 			reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2340 
2341 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2342 	} else {
2343 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2344 		reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2345 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2346 	}
2347 
2348 	dep = dwc->eps[0];
2349 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2350 			false);
2351 	if (ret) {
2352 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2353 		return;
2354 	}
2355 
2356 	dep = dwc->eps[1];
2357 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2358 			false);
2359 	if (ret) {
2360 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2361 		return;
2362 	}
2363 
2364 	/*
2365 	 * Configure PHY via GUSB3PIPECTLn if required.
2366 	 *
2367 	 * Update GTXFIFOSIZn
2368 	 *
2369 	 * In both cases reset values should be sufficient.
2370 	 */
2371 }
2372 
2373 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2374 {
2375 	/*
2376 	 * TODO take core out of low power mode when that's
2377 	 * implemented.
2378 	 */
2379 
2380 	dwc->gadget_driver->resume(&dwc->gadget);
2381 }
2382 
2383 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2384 		unsigned int evtinfo)
2385 {
2386 	enum dwc3_link_state	next = evtinfo & DWC3_LINK_STATE_MASK;
2387 	unsigned int		pwropt;
2388 
2389 	/*
2390 	 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2391 	 * Hibernation mode enabled which would show up when device detects
2392 	 * host-initiated U3 exit.
2393 	 *
2394 	 * In that case, device will generate a Link State Change Interrupt
2395 	 * from U3 to RESUME which is only necessary if Hibernation is
2396 	 * configured in.
2397 	 *
2398 	 * There are no functional changes due to such spurious event and we
2399 	 * just need to ignore it.
2400 	 *
2401 	 * Refers to:
2402 	 *
2403 	 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2404 	 * operational mode
2405 	 */
2406 	pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2407 	if ((dwc->revision < DWC3_REVISION_250A) &&
2408 			(pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2409 		if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2410 				(next == DWC3_LINK_STATE_RESUME)) {
2411 			dwc3_trace(trace_dwc3_gadget,
2412 					"ignoring transition U3 -> Resume");
2413 			return;
2414 		}
2415 	}
2416 
2417 	/*
2418 	 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2419 	 * on the link partner, the USB session might do multiple entry/exit
2420 	 * of low power states before a transfer takes place.
2421 	 *
2422 	 * Due to this problem, we might experience lower throughput. The
2423 	 * suggested workaround is to disable DCTL[12:9] bits if we're
2424 	 * transitioning from U1/U2 to U0 and enable those bits again
2425 	 * after a transfer completes and there are no pending transfers
2426 	 * on any of the enabled endpoints.
2427 	 *
2428 	 * This is the first half of that workaround.
2429 	 *
2430 	 * Refers to:
2431 	 *
2432 	 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2433 	 * core send LGO_Ux entering U0
2434 	 */
2435 	if (dwc->revision < DWC3_REVISION_183A) {
2436 		if (next == DWC3_LINK_STATE_U0) {
2437 			u32	u1u2;
2438 			u32	reg;
2439 
2440 			switch (dwc->link_state) {
2441 			case DWC3_LINK_STATE_U1:
2442 			case DWC3_LINK_STATE_U2:
2443 				reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2444 				u1u2 = reg & (DWC3_DCTL_INITU2ENA
2445 						| DWC3_DCTL_ACCEPTU2ENA
2446 						| DWC3_DCTL_INITU1ENA
2447 						| DWC3_DCTL_ACCEPTU1ENA);
2448 
2449 				if (!dwc->u1u2)
2450 					dwc->u1u2 = reg & u1u2;
2451 
2452 				reg &= ~u1u2;
2453 
2454 				dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2455 				break;
2456 			default:
2457 				/* do nothing */
2458 				break;
2459 			}
2460 		}
2461 	}
2462 
2463 	switch (next) {
2464 	case DWC3_LINK_STATE_U1:
2465 		if (dwc->speed == USB_SPEED_SUPER)
2466 			dwc3_suspend_gadget(dwc);
2467 		break;
2468 	case DWC3_LINK_STATE_U2:
2469 	case DWC3_LINK_STATE_U3:
2470 		dwc3_suspend_gadget(dwc);
2471 		break;
2472 	case DWC3_LINK_STATE_RESUME:
2473 		dwc3_resume_gadget(dwc);
2474 		break;
2475 	default:
2476 		/* do nothing */
2477 		break;
2478 	}
2479 
2480 	dwc->link_state = next;
2481 }
2482 
2483 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2484 		unsigned int evtinfo)
2485 {
2486 	unsigned int is_ss = evtinfo & BIT(4);
2487 
2488 	/**
2489 	 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2490 	 * have a known issue which can cause USB CV TD.9.23 to fail
2491 	 * randomly.
2492 	 *
2493 	 * Because of this issue, core could generate bogus hibernation
2494 	 * events which SW needs to ignore.
2495 	 *
2496 	 * Refers to:
2497 	 *
2498 	 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2499 	 * Device Fallback from SuperSpeed
2500 	 */
2501 	if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2502 		return;
2503 
2504 	/* enter hibernation here */
2505 }
2506 
2507 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2508 		const struct dwc3_event_devt *event)
2509 {
2510 	switch (event->type) {
2511 	case DWC3_DEVICE_EVENT_DISCONNECT:
2512 		dwc3_gadget_disconnect_interrupt(dwc);
2513 		break;
2514 	case DWC3_DEVICE_EVENT_RESET:
2515 		dwc3_gadget_reset_interrupt(dwc);
2516 		break;
2517 	case DWC3_DEVICE_EVENT_CONNECT_DONE:
2518 		dwc3_gadget_conndone_interrupt(dwc);
2519 		break;
2520 	case DWC3_DEVICE_EVENT_WAKEUP:
2521 		dwc3_gadget_wakeup_interrupt(dwc);
2522 		break;
2523 	case DWC3_DEVICE_EVENT_HIBER_REQ:
2524 		if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2525 					"unexpected hibernation event\n"))
2526 			break;
2527 
2528 		dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2529 		break;
2530 	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2531 		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2532 		break;
2533 	case DWC3_DEVICE_EVENT_EOPF:
2534 		dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
2535 		break;
2536 	case DWC3_DEVICE_EVENT_SOF:
2537 		dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
2538 		break;
2539 	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2540 		dwc3_trace(trace_dwc3_gadget, "Erratic Error");
2541 		break;
2542 	case DWC3_DEVICE_EVENT_CMD_CMPL:
2543 		dwc3_trace(trace_dwc3_gadget, "Command Complete");
2544 		break;
2545 	case DWC3_DEVICE_EVENT_OVERFLOW:
2546 		dwc3_trace(trace_dwc3_gadget, "Overflow");
2547 		break;
2548 	default:
2549 		dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2550 	}
2551 }
2552 
2553 static void dwc3_process_event_entry(struct dwc3 *dwc,
2554 		const union dwc3_event *event)
2555 {
2556 	trace_dwc3_event(event->raw);
2557 
2558 	/* Endpoint IRQ, handle it and return early */
2559 	if (event->type.is_devspec == 0) {
2560 		/* depevt */
2561 		return dwc3_endpoint_interrupt(dwc, &event->depevt);
2562 	}
2563 
2564 	switch (event->type.type) {
2565 	case DWC3_EVENT_TYPE_DEV:
2566 		dwc3_gadget_interrupt(dwc, &event->devt);
2567 		break;
2568 	/* REVISIT what to do with Carkit and I2C events ? */
2569 	default:
2570 		dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2571 	}
2572 }
2573 
2574 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2575 {
2576 	struct dwc3_event_buffer *evt;
2577 	irqreturn_t ret = IRQ_NONE;
2578 	int left;
2579 	u32 reg;
2580 
2581 	evt = dwc->ev_buffs[buf];
2582 	left = evt->count;
2583 
2584 	if (!(evt->flags & DWC3_EVENT_PENDING))
2585 		return IRQ_NONE;
2586 
2587 	while (left > 0) {
2588 		union dwc3_event event;
2589 
2590 		event.raw = *(u32 *) (evt->buf + evt->lpos);
2591 
2592 		dwc3_process_event_entry(dwc, &event);
2593 
2594 		/*
2595 		 * FIXME we wrap around correctly to the next entry as
2596 		 * almost all entries are 4 bytes in size. There is one
2597 		 * entry which has 12 bytes which is a regular entry
2598 		 * followed by 8 bytes data. ATM I don't know how
2599 		 * things are organized if we get next to the a
2600 		 * boundary so I worry about that once we try to handle
2601 		 * that.
2602 		 */
2603 		evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2604 		left -= 4;
2605 
2606 		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2607 	}
2608 
2609 	evt->count = 0;
2610 	evt->flags &= ~DWC3_EVENT_PENDING;
2611 	ret = IRQ_HANDLED;
2612 
2613 	/* Unmask interrupt */
2614 	reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2615 	reg &= ~DWC3_GEVNTSIZ_INTMASK;
2616 	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2617 
2618 	return ret;
2619 }
2620 
2621 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2622 {
2623 	struct dwc3 *dwc = _dwc;
2624 	unsigned long flags;
2625 	irqreturn_t ret = IRQ_NONE;
2626 	int i;
2627 
2628 	spin_lock_irqsave(&dwc->lock, flags);
2629 
2630 	for (i = 0; i < dwc->num_event_buffers; i++)
2631 		ret |= dwc3_process_event_buf(dwc, i);
2632 
2633 	spin_unlock_irqrestore(&dwc->lock, flags);
2634 
2635 	return ret;
2636 }
2637 
2638 static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
2639 {
2640 	struct dwc3_event_buffer *evt;
2641 	u32 count;
2642 	u32 reg;
2643 
2644 	evt = dwc->ev_buffs[buf];
2645 
2646 	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2647 	count &= DWC3_GEVNTCOUNT_MASK;
2648 	if (!count)
2649 		return IRQ_NONE;
2650 
2651 	evt->count = count;
2652 	evt->flags |= DWC3_EVENT_PENDING;
2653 
2654 	/* Mask interrupt */
2655 	reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2656 	reg |= DWC3_GEVNTSIZ_INTMASK;
2657 	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2658 
2659 	return IRQ_WAKE_THREAD;
2660 }
2661 
2662 static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2663 {
2664 	struct dwc3			*dwc = _dwc;
2665 	int				i;
2666 	irqreturn_t			ret = IRQ_NONE;
2667 
2668 	for (i = 0; i < dwc->num_event_buffers; i++) {
2669 		irqreturn_t status;
2670 
2671 		status = dwc3_check_event_buf(dwc, i);
2672 		if (status == IRQ_WAKE_THREAD)
2673 			ret = status;
2674 	}
2675 
2676 	return ret;
2677 }
2678 
2679 /**
2680  * dwc3_gadget_init - Initializes gadget related registers
2681  * @dwc: pointer to our controller context structure
2682  *
2683  * Returns 0 on success otherwise negative errno.
2684  */
2685 int dwc3_gadget_init(struct dwc3 *dwc)
2686 {
2687 	int					ret;
2688 
2689 	dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2690 			&dwc->ctrl_req_addr, GFP_KERNEL);
2691 	if (!dwc->ctrl_req) {
2692 		dev_err(dwc->dev, "failed to allocate ctrl request\n");
2693 		ret = -ENOMEM;
2694 		goto err0;
2695 	}
2696 
2697 	dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
2698 			&dwc->ep0_trb_addr, GFP_KERNEL);
2699 	if (!dwc->ep0_trb) {
2700 		dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2701 		ret = -ENOMEM;
2702 		goto err1;
2703 	}
2704 
2705 	dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
2706 	if (!dwc->setup_buf) {
2707 		ret = -ENOMEM;
2708 		goto err2;
2709 	}
2710 
2711 	dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2712 			DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2713 			GFP_KERNEL);
2714 	if (!dwc->ep0_bounce) {
2715 		dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2716 		ret = -ENOMEM;
2717 		goto err3;
2718 	}
2719 
2720 	dwc->gadget.ops			= &dwc3_gadget_ops;
2721 	dwc->gadget.max_speed		= USB_SPEED_SUPER;
2722 	dwc->gadget.speed		= USB_SPEED_UNKNOWN;
2723 	dwc->gadget.sg_supported	= true;
2724 	dwc->gadget.name		= "dwc3-gadget";
2725 
2726 	/*
2727 	 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2728 	 * on ep out.
2729 	 */
2730 	dwc->gadget.quirk_ep_out_aligned_size = true;
2731 
2732 	/*
2733 	 * REVISIT: Here we should clear all pending IRQs to be
2734 	 * sure we're starting from a well known location.
2735 	 */
2736 
2737 	ret = dwc3_gadget_init_endpoints(dwc);
2738 	if (ret)
2739 		goto err4;
2740 
2741 	ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2742 	if (ret) {
2743 		dev_err(dwc->dev, "failed to register udc\n");
2744 		goto err4;
2745 	}
2746 
2747 	return 0;
2748 
2749 err4:
2750 	dwc3_gadget_free_endpoints(dwc);
2751 	dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2752 			dwc->ep0_bounce, dwc->ep0_bounce_addr);
2753 
2754 err3:
2755 	kfree(dwc->setup_buf);
2756 
2757 err2:
2758 	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2759 			dwc->ep0_trb, dwc->ep0_trb_addr);
2760 
2761 err1:
2762 	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2763 			dwc->ctrl_req, dwc->ctrl_req_addr);
2764 
2765 err0:
2766 	return ret;
2767 }
2768 
2769 /* -------------------------------------------------------------------------- */
2770 
2771 void dwc3_gadget_exit(struct dwc3 *dwc)
2772 {
2773 	usb_del_gadget_udc(&dwc->gadget);
2774 
2775 	dwc3_gadget_free_endpoints(dwc);
2776 
2777 	dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2778 			dwc->ep0_bounce, dwc->ep0_bounce_addr);
2779 
2780 	kfree(dwc->setup_buf);
2781 
2782 	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2783 			dwc->ep0_trb, dwc->ep0_trb_addr);
2784 
2785 	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2786 			dwc->ctrl_req, dwc->ctrl_req_addr);
2787 }
2788 
2789 int dwc3_gadget_suspend(struct dwc3 *dwc)
2790 {
2791 	if (dwc->pullups_connected) {
2792 		dwc3_gadget_disable_irq(dwc);
2793 		dwc3_gadget_run_stop(dwc, true, true);
2794 	}
2795 
2796 	__dwc3_gadget_ep_disable(dwc->eps[0]);
2797 	__dwc3_gadget_ep_disable(dwc->eps[1]);
2798 
2799 	dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
2800 
2801 	return 0;
2802 }
2803 
2804 int dwc3_gadget_resume(struct dwc3 *dwc)
2805 {
2806 	struct dwc3_ep		*dep;
2807 	int			ret;
2808 
2809 	/* Start with SuperSpeed Default */
2810 	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2811 
2812 	dep = dwc->eps[0];
2813 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2814 			false);
2815 	if (ret)
2816 		goto err0;
2817 
2818 	dep = dwc->eps[1];
2819 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2820 			false);
2821 	if (ret)
2822 		goto err1;
2823 
2824 	/* begin to receive SETUP packets */
2825 	dwc->ep0state = EP0_SETUP_PHASE;
2826 	dwc3_ep0_out_start(dwc);
2827 
2828 	dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
2829 
2830 	if (dwc->pullups_connected) {
2831 		dwc3_gadget_enable_irq(dwc);
2832 		dwc3_gadget_run_stop(dwc, true, false);
2833 	}
2834 
2835 	return 0;
2836 
2837 err1:
2838 	__dwc3_gadget_ep_disable(dwc->eps[0]);
2839 
2840 err0:
2841 	return ret;
2842 }
2843