xref: /openbmc/linux/drivers/usb/dwc3/gadget.c (revision 9cdb81c7)
1 /**
2  * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3  *
4  * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5  *
6  * Authors: Felipe Balbi <balbi@ti.com>,
7  *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions, and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. The names of the above-listed copyright holders may not be used
19  *    to endorse or promote products derived from this software without
20  *    specific prior written permission.
21  *
22  * ALTERNATIVELY, this software may be distributed under the terms of the
23  * GNU General Public License ("GPL") version 2, as published by the Free
24  * Software Foundation.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <linux/kernel.h>
40 #include <linux/delay.h>
41 #include <linux/slab.h>
42 #include <linux/spinlock.h>
43 #include <linux/platform_device.h>
44 #include <linux/pm_runtime.h>
45 #include <linux/interrupt.h>
46 #include <linux/io.h>
47 #include <linux/list.h>
48 #include <linux/dma-mapping.h>
49 
50 #include <linux/usb/ch9.h>
51 #include <linux/usb/gadget.h>
52 
53 #include "core.h"
54 #include "gadget.h"
55 #include "io.h"
56 
57 /**
58  * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
59  * @dwc: pointer to our context structure
60  * @mode: the mode to set (J, K SE0 NAK, Force Enable)
61  *
62  * Caller should take care of locking. This function will
63  * return 0 on success or -EINVAL if wrong Test Selector
64  * is passed
65  */
66 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
67 {
68 	u32		reg;
69 
70 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
71 	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
72 
73 	switch (mode) {
74 	case TEST_J:
75 	case TEST_K:
76 	case TEST_SE0_NAK:
77 	case TEST_PACKET:
78 	case TEST_FORCE_EN:
79 		reg |= mode << 1;
80 		break;
81 	default:
82 		return -EINVAL;
83 	}
84 
85 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
86 
87 	return 0;
88 }
89 
90 /**
91  * dwc3_gadget_set_link_state - Sets USB Link to a particular State
92  * @dwc: pointer to our context structure
93  * @state: the state to put link into
94  *
95  * Caller should take care of locking. This function will
96  * return 0 on success or -ETIMEDOUT.
97  */
98 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
99 {
100 	int		retries = 10000;
101 	u32		reg;
102 
103 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
104 	reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
105 
106 	/* set requested state */
107 	reg |= DWC3_DCTL_ULSTCHNGREQ(state);
108 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
109 
110 	/* wait for a change in DSTS */
111 	while (--retries) {
112 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
113 
114 		if (DWC3_DSTS_USBLNKST(reg) == state)
115 			return 0;
116 
117 		udelay(5);
118 	}
119 
120 	dev_vdbg(dwc->dev, "link state change request timed out\n");
121 
122 	return -ETIMEDOUT;
123 }
124 
125 /**
126  * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
127  * @dwc: pointer to our context structure
128  *
129  * This function will a best effort FIFO allocation in order
130  * to improve FIFO usage and throughput, while still allowing
131  * us to enable as many endpoints as possible.
132  *
133  * Keep in mind that this operation will be highly dependent
134  * on the configured size for RAM1 - which contains TxFifo -,
135  * the amount of endpoints enabled on coreConsultant tool, and
136  * the width of the Master Bus.
137  *
138  * In the ideal world, we would always be able to satisfy the
139  * following equation:
140  *
141  * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
142  * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
143  *
144  * Unfortunately, due to many variables that's not always the case.
145  */
146 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
147 {
148 	int		last_fifo_depth = 0;
149 	int		ram1_depth;
150 	int		fifo_size;
151 	int		mdwidth;
152 	int		num;
153 
154 	if (!dwc->needs_fifo_resize)
155 		return 0;
156 
157 	ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
158 	mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
159 
160 	/* MDWIDTH is represented in bits, we need it in bytes */
161 	mdwidth >>= 3;
162 
163 	/*
164 	 * FIXME For now we will only allocate 1 wMaxPacketSize space
165 	 * for each enabled endpoint, later patches will come to
166 	 * improve this algorithm so that we better use the internal
167 	 * FIFO space
168 	 */
169 	for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
170 		struct dwc3_ep	*dep = dwc->eps[num];
171 		int		fifo_number = dep->number >> 1;
172 		int		mult = 1;
173 		int		tmp;
174 
175 		if (!(dep->number & 1))
176 			continue;
177 
178 		if (!(dep->flags & DWC3_EP_ENABLED))
179 			continue;
180 
181 		if (usb_endpoint_xfer_bulk(dep->desc)
182 				|| usb_endpoint_xfer_isoc(dep->desc))
183 			mult = 3;
184 
185 		/*
186 		 * REVISIT: the following assumes we will always have enough
187 		 * space available on the FIFO RAM for all possible use cases.
188 		 * Make sure that's true somehow and change FIFO allocation
189 		 * accordingly.
190 		 *
191 		 * If we have Bulk or Isochronous endpoints, we want
192 		 * them to be able to be very, very fast. So we're giving
193 		 * those endpoints a fifo_size which is enough for 3 full
194 		 * packets
195 		 */
196 		tmp = mult * (dep->endpoint.maxpacket + mdwidth);
197 		tmp += mdwidth;
198 
199 		fifo_size = DIV_ROUND_UP(tmp, mdwidth);
200 
201 		fifo_size |= (last_fifo_depth << 16);
202 
203 		dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
204 				dep->name, last_fifo_depth, fifo_size & 0xffff);
205 
206 		dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
207 				fifo_size);
208 
209 		last_fifo_depth += (fifo_size & 0xffff);
210 	}
211 
212 	return 0;
213 }
214 
215 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
216 		int status)
217 {
218 	struct dwc3			*dwc = dep->dwc;
219 
220 	if (req->queued) {
221 		if (req->request.num_mapped_sgs)
222 			dep->busy_slot += req->request.num_mapped_sgs;
223 		else
224 			dep->busy_slot++;
225 
226 		/*
227 		 * Skip LINK TRB. We can't use req->trb and check for
228 		 * DWC3_TRBCTL_LINK_TRB because it points the TRB we just
229 		 * completed (not the LINK TRB).
230 		 */
231 		if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
232 				usb_endpoint_xfer_isoc(dep->desc))
233 			dep->busy_slot++;
234 	}
235 	list_del(&req->list);
236 	req->trb = NULL;
237 
238 	if (req->request.status == -EINPROGRESS)
239 		req->request.status = status;
240 
241 	usb_gadget_unmap_request(&dwc->gadget, &req->request,
242 			req->direction);
243 
244 	dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
245 			req, dep->name, req->request.actual,
246 			req->request.length, status);
247 
248 	spin_unlock(&dwc->lock);
249 	req->request.complete(&dep->endpoint, &req->request);
250 	spin_lock(&dwc->lock);
251 }
252 
253 static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
254 {
255 	switch (cmd) {
256 	case DWC3_DEPCMD_DEPSTARTCFG:
257 		return "Start New Configuration";
258 	case DWC3_DEPCMD_ENDTRANSFER:
259 		return "End Transfer";
260 	case DWC3_DEPCMD_UPDATETRANSFER:
261 		return "Update Transfer";
262 	case DWC3_DEPCMD_STARTTRANSFER:
263 		return "Start Transfer";
264 	case DWC3_DEPCMD_CLEARSTALL:
265 		return "Clear Stall";
266 	case DWC3_DEPCMD_SETSTALL:
267 		return "Set Stall";
268 	case DWC3_DEPCMD_GETSEQNUMBER:
269 		return "Get Data Sequence Number";
270 	case DWC3_DEPCMD_SETTRANSFRESOURCE:
271 		return "Set Endpoint Transfer Resource";
272 	case DWC3_DEPCMD_SETEPCONFIG:
273 		return "Set Endpoint Configuration";
274 	default:
275 		return "UNKNOWN command";
276 	}
277 }
278 
279 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
280 		unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
281 {
282 	struct dwc3_ep		*dep = dwc->eps[ep];
283 	u32			timeout = 500;
284 	u32			reg;
285 
286 	dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
287 			dep->name,
288 			dwc3_gadget_ep_cmd_string(cmd), params->param0,
289 			params->param1, params->param2);
290 
291 	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
292 	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
293 	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
294 
295 	dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
296 	do {
297 		reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
298 		if (!(reg & DWC3_DEPCMD_CMDACT)) {
299 			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
300 					DWC3_DEPCMD_STATUS(reg));
301 			return 0;
302 		}
303 
304 		/*
305 		 * We can't sleep here, because it is also called from
306 		 * interrupt context.
307 		 */
308 		timeout--;
309 		if (!timeout)
310 			return -ETIMEDOUT;
311 
312 		udelay(1);
313 	} while (1);
314 }
315 
316 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
317 		struct dwc3_trb *trb)
318 {
319 	u32		offset = (char *) trb - (char *) dep->trb_pool;
320 
321 	return dep->trb_pool_dma + offset;
322 }
323 
324 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
325 {
326 	struct dwc3		*dwc = dep->dwc;
327 
328 	if (dep->trb_pool)
329 		return 0;
330 
331 	if (dep->number == 0 || dep->number == 1)
332 		return 0;
333 
334 	dep->trb_pool = dma_alloc_coherent(dwc->dev,
335 			sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
336 			&dep->trb_pool_dma, GFP_KERNEL);
337 	if (!dep->trb_pool) {
338 		dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
339 				dep->name);
340 		return -ENOMEM;
341 	}
342 
343 	return 0;
344 }
345 
346 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
347 {
348 	struct dwc3		*dwc = dep->dwc;
349 
350 	dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
351 			dep->trb_pool, dep->trb_pool_dma);
352 
353 	dep->trb_pool = NULL;
354 	dep->trb_pool_dma = 0;
355 }
356 
357 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
358 {
359 	struct dwc3_gadget_ep_cmd_params params;
360 	u32			cmd;
361 
362 	memset(&params, 0x00, sizeof(params));
363 
364 	if (dep->number != 1) {
365 		cmd = DWC3_DEPCMD_DEPSTARTCFG;
366 		/* XferRscIdx == 0 for ep0 and 2 for the remaining */
367 		if (dep->number > 1) {
368 			if (dwc->start_config_issued)
369 				return 0;
370 			dwc->start_config_issued = true;
371 			cmd |= DWC3_DEPCMD_PARAM(2);
372 		}
373 
374 		return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
375 	}
376 
377 	return 0;
378 }
379 
380 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
381 		const struct usb_endpoint_descriptor *desc,
382 		const struct usb_ss_ep_comp_descriptor *comp_desc)
383 {
384 	struct dwc3_gadget_ep_cmd_params params;
385 
386 	memset(&params, 0x00, sizeof(params));
387 
388 	params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
389 		| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc))
390 		| DWC3_DEPCFG_BURST_SIZE(dep->endpoint.maxburst);
391 
392 	params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
393 		| DWC3_DEPCFG_XFER_NOT_READY_EN;
394 
395 	if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
396 		params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
397 			| DWC3_DEPCFG_STREAM_EVENT_EN;
398 		dep->stream_capable = true;
399 	}
400 
401 	if (usb_endpoint_xfer_isoc(desc))
402 		params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
403 
404 	/*
405 	 * We are doing 1:1 mapping for endpoints, meaning
406 	 * Physical Endpoints 2 maps to Logical Endpoint 2 and
407 	 * so on. We consider the direction bit as part of the physical
408 	 * endpoint number. So USB endpoint 0x81 is 0x03.
409 	 */
410 	params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
411 
412 	/*
413 	 * We must use the lower 16 TX FIFOs even though
414 	 * HW might have more
415 	 */
416 	if (dep->direction)
417 		params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
418 
419 	if (desc->bInterval) {
420 		params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
421 		dep->interval = 1 << (desc->bInterval - 1);
422 	}
423 
424 	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
425 			DWC3_DEPCMD_SETEPCONFIG, &params);
426 }
427 
428 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
429 {
430 	struct dwc3_gadget_ep_cmd_params params;
431 
432 	memset(&params, 0x00, sizeof(params));
433 
434 	params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
435 
436 	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
437 			DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
438 }
439 
440 /**
441  * __dwc3_gadget_ep_enable - Initializes a HW endpoint
442  * @dep: endpoint to be initialized
443  * @desc: USB Endpoint Descriptor
444  *
445  * Caller should take care of locking
446  */
447 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
448 		const struct usb_endpoint_descriptor *desc,
449 		const struct usb_ss_ep_comp_descriptor *comp_desc)
450 {
451 	struct dwc3		*dwc = dep->dwc;
452 	u32			reg;
453 	int			ret = -ENOMEM;
454 
455 	if (!(dep->flags & DWC3_EP_ENABLED)) {
456 		ret = dwc3_gadget_start_config(dwc, dep);
457 		if (ret)
458 			return ret;
459 	}
460 
461 	ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc);
462 	if (ret)
463 		return ret;
464 
465 	if (!(dep->flags & DWC3_EP_ENABLED)) {
466 		struct dwc3_trb	*trb_st_hw;
467 		struct dwc3_trb	*trb_link;
468 
469 		ret = dwc3_gadget_set_xfer_resource(dwc, dep);
470 		if (ret)
471 			return ret;
472 
473 		dep->desc = desc;
474 		dep->comp_desc = comp_desc;
475 		dep->type = usb_endpoint_type(desc);
476 		dep->flags |= DWC3_EP_ENABLED;
477 
478 		reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
479 		reg |= DWC3_DALEPENA_EP(dep->number);
480 		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
481 
482 		if (!usb_endpoint_xfer_isoc(desc))
483 			return 0;
484 
485 		memset(&trb_link, 0, sizeof(trb_link));
486 
487 		/* Link TRB for ISOC. The HWO bit is never reset */
488 		trb_st_hw = &dep->trb_pool[0];
489 
490 		trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
491 
492 		trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
493 		trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
494 		trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
495 		trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
496 	}
497 
498 	return 0;
499 }
500 
501 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
502 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
503 {
504 	struct dwc3_request		*req;
505 
506 	if (!list_empty(&dep->req_queued))
507 		dwc3_stop_active_transfer(dwc, dep->number);
508 
509 	while (!list_empty(&dep->request_list)) {
510 		req = next_request(&dep->request_list);
511 
512 		dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
513 	}
514 }
515 
516 /**
517  * __dwc3_gadget_ep_disable - Disables a HW endpoint
518  * @dep: the endpoint to disable
519  *
520  * This function also removes requests which are currently processed ny the
521  * hardware and those which are not yet scheduled.
522  * Caller should take care of locking.
523  */
524 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
525 {
526 	struct dwc3		*dwc = dep->dwc;
527 	u32			reg;
528 
529 	dwc3_remove_requests(dwc, dep);
530 
531 	reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
532 	reg &= ~DWC3_DALEPENA_EP(dep->number);
533 	dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
534 
535 	dep->stream_capable = false;
536 	dep->desc = NULL;
537 	dep->endpoint.desc = NULL;
538 	dep->comp_desc = NULL;
539 	dep->type = 0;
540 	dep->flags = 0;
541 
542 	return 0;
543 }
544 
545 /* -------------------------------------------------------------------------- */
546 
547 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
548 		const struct usb_endpoint_descriptor *desc)
549 {
550 	return -EINVAL;
551 }
552 
553 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
554 {
555 	return -EINVAL;
556 }
557 
558 /* -------------------------------------------------------------------------- */
559 
560 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
561 		const struct usb_endpoint_descriptor *desc)
562 {
563 	struct dwc3_ep			*dep;
564 	struct dwc3			*dwc;
565 	unsigned long			flags;
566 	int				ret;
567 
568 	if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
569 		pr_debug("dwc3: invalid parameters\n");
570 		return -EINVAL;
571 	}
572 
573 	if (!desc->wMaxPacketSize) {
574 		pr_debug("dwc3: missing wMaxPacketSize\n");
575 		return -EINVAL;
576 	}
577 
578 	dep = to_dwc3_ep(ep);
579 	dwc = dep->dwc;
580 
581 	switch (usb_endpoint_type(desc)) {
582 	case USB_ENDPOINT_XFER_CONTROL:
583 		strlcat(dep->name, "-control", sizeof(dep->name));
584 		break;
585 	case USB_ENDPOINT_XFER_ISOC:
586 		strlcat(dep->name, "-isoc", sizeof(dep->name));
587 		break;
588 	case USB_ENDPOINT_XFER_BULK:
589 		strlcat(dep->name, "-bulk", sizeof(dep->name));
590 		break;
591 	case USB_ENDPOINT_XFER_INT:
592 		strlcat(dep->name, "-int", sizeof(dep->name));
593 		break;
594 	default:
595 		dev_err(dwc->dev, "invalid endpoint transfer type\n");
596 	}
597 
598 	if (dep->flags & DWC3_EP_ENABLED) {
599 		dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
600 				dep->name);
601 		return 0;
602 	}
603 
604 	dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
605 
606 	spin_lock_irqsave(&dwc->lock, flags);
607 	ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc);
608 	spin_unlock_irqrestore(&dwc->lock, flags);
609 
610 	return ret;
611 }
612 
613 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
614 {
615 	struct dwc3_ep			*dep;
616 	struct dwc3			*dwc;
617 	unsigned long			flags;
618 	int				ret;
619 
620 	if (!ep) {
621 		pr_debug("dwc3: invalid parameters\n");
622 		return -EINVAL;
623 	}
624 
625 	dep = to_dwc3_ep(ep);
626 	dwc = dep->dwc;
627 
628 	if (!(dep->flags & DWC3_EP_ENABLED)) {
629 		dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
630 				dep->name);
631 		return 0;
632 	}
633 
634 	snprintf(dep->name, sizeof(dep->name), "ep%d%s",
635 			dep->number >> 1,
636 			(dep->number & 1) ? "in" : "out");
637 
638 	spin_lock_irqsave(&dwc->lock, flags);
639 	ret = __dwc3_gadget_ep_disable(dep);
640 	spin_unlock_irqrestore(&dwc->lock, flags);
641 
642 	return ret;
643 }
644 
645 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
646 	gfp_t gfp_flags)
647 {
648 	struct dwc3_request		*req;
649 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
650 	struct dwc3			*dwc = dep->dwc;
651 
652 	req = kzalloc(sizeof(*req), gfp_flags);
653 	if (!req) {
654 		dev_err(dwc->dev, "not enough memory\n");
655 		return NULL;
656 	}
657 
658 	req->epnum	= dep->number;
659 	req->dep	= dep;
660 
661 	return &req->request;
662 }
663 
664 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
665 		struct usb_request *request)
666 {
667 	struct dwc3_request		*req = to_dwc3_request(request);
668 
669 	kfree(req);
670 }
671 
672 /**
673  * dwc3_prepare_one_trb - setup one TRB from one request
674  * @dep: endpoint for which this request is prepared
675  * @req: dwc3_request pointer
676  */
677 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
678 		struct dwc3_request *req, dma_addr_t dma,
679 		unsigned length, unsigned last, unsigned chain)
680 {
681 	struct dwc3		*dwc = dep->dwc;
682 	struct dwc3_trb		*trb;
683 
684 	unsigned int		cur_slot;
685 
686 	dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
687 			dep->name, req, (unsigned long long) dma,
688 			length, last ? " last" : "",
689 			chain ? " chain" : "");
690 
691 	trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
692 	cur_slot = dep->free_slot;
693 	dep->free_slot++;
694 
695 	/* Skip the LINK-TRB on ISOC */
696 	if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
697 			usb_endpoint_xfer_isoc(dep->desc))
698 		return;
699 
700 	if (!req->trb) {
701 		dwc3_gadget_move_request_queued(req);
702 		req->trb = trb;
703 		req->trb_dma = dwc3_trb_dma_offset(dep, trb);
704 	}
705 
706 	trb->size = DWC3_TRB_SIZE_LENGTH(length);
707 	trb->bpl = lower_32_bits(dma);
708 	trb->bph = upper_32_bits(dma);
709 
710 	switch (usb_endpoint_type(dep->desc)) {
711 	case USB_ENDPOINT_XFER_CONTROL:
712 		trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
713 		break;
714 
715 	case USB_ENDPOINT_XFER_ISOC:
716 		trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
717 
718 		/* IOC every DWC3_TRB_NUM / 4 so we can refill */
719 		if (!(cur_slot % (DWC3_TRB_NUM / 4)))
720 			trb->ctrl |= DWC3_TRB_CTRL_IOC;
721 		break;
722 
723 	case USB_ENDPOINT_XFER_BULK:
724 	case USB_ENDPOINT_XFER_INT:
725 		trb->ctrl = DWC3_TRBCTL_NORMAL;
726 		break;
727 	default:
728 		/*
729 		 * This is only possible with faulty memory because we
730 		 * checked it already :)
731 		 */
732 		BUG();
733 	}
734 
735 	if (usb_endpoint_xfer_isoc(dep->desc)) {
736 		trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
737 		trb->ctrl |= DWC3_TRB_CTRL_CSP;
738 	} else {
739 		if (chain)
740 			trb->ctrl |= DWC3_TRB_CTRL_CHN;
741 
742 		if (last)
743 			trb->ctrl |= DWC3_TRB_CTRL_LST;
744 	}
745 
746 	if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable)
747 		trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
748 
749 	trb->ctrl |= DWC3_TRB_CTRL_HWO;
750 }
751 
752 /*
753  * dwc3_prepare_trbs - setup TRBs from requests
754  * @dep: endpoint for which requests are being prepared
755  * @starting: true if the endpoint is idle and no requests are queued.
756  *
757  * The function goes through the requests list and sets up TRBs for the
758  * transfers. The function returns once there are no more TRBs available or
759  * it runs out of requests.
760  */
761 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
762 {
763 	struct dwc3_request	*req, *n;
764 	u32			trbs_left;
765 	u32			max;
766 	unsigned int		last_one = 0;
767 
768 	BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
769 
770 	/* the first request must not be queued */
771 	trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
772 
773 	/* Can't wrap around on a non-isoc EP since there's no link TRB */
774 	if (!usb_endpoint_xfer_isoc(dep->desc)) {
775 		max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
776 		if (trbs_left > max)
777 			trbs_left = max;
778 	}
779 
780 	/*
781 	 * If busy & slot are equal than it is either full or empty. If we are
782 	 * starting to process requests then we are empty. Otherwise we are
783 	 * full and don't do anything
784 	 */
785 	if (!trbs_left) {
786 		if (!starting)
787 			return;
788 		trbs_left = DWC3_TRB_NUM;
789 		/*
790 		 * In case we start from scratch, we queue the ISOC requests
791 		 * starting from slot 1. This is done because we use ring
792 		 * buffer and have no LST bit to stop us. Instead, we place
793 		 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
794 		 * after the first request so we start at slot 1 and have
795 		 * 7 requests proceed before we hit the first IOC.
796 		 * Other transfer types don't use the ring buffer and are
797 		 * processed from the first TRB until the last one. Since we
798 		 * don't wrap around we have to start at the beginning.
799 		 */
800 		if (usb_endpoint_xfer_isoc(dep->desc)) {
801 			dep->busy_slot = 1;
802 			dep->free_slot = 1;
803 		} else {
804 			dep->busy_slot = 0;
805 			dep->free_slot = 0;
806 		}
807 	}
808 
809 	/* The last TRB is a link TRB, not used for xfer */
810 	if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc))
811 		return;
812 
813 	list_for_each_entry_safe(req, n, &dep->request_list, list) {
814 		unsigned	length;
815 		dma_addr_t	dma;
816 
817 		if (req->request.num_mapped_sgs > 0) {
818 			struct usb_request *request = &req->request;
819 			struct scatterlist *sg = request->sg;
820 			struct scatterlist *s;
821 			int		i;
822 
823 			for_each_sg(sg, s, request->num_mapped_sgs, i) {
824 				unsigned chain = true;
825 
826 				length = sg_dma_len(s);
827 				dma = sg_dma_address(s);
828 
829 				if (i == (request->num_mapped_sgs - 1) ||
830 						sg_is_last(s)) {
831 					last_one = true;
832 					chain = false;
833 				}
834 
835 				trbs_left--;
836 				if (!trbs_left)
837 					last_one = true;
838 
839 				if (last_one)
840 					chain = false;
841 
842 				dwc3_prepare_one_trb(dep, req, dma, length,
843 						last_one, chain);
844 
845 				if (last_one)
846 					break;
847 			}
848 		} else {
849 			dma = req->request.dma;
850 			length = req->request.length;
851 			trbs_left--;
852 
853 			if (!trbs_left)
854 				last_one = 1;
855 
856 			/* Is this the last request? */
857 			if (list_is_last(&req->list, &dep->request_list))
858 				last_one = 1;
859 
860 			dwc3_prepare_one_trb(dep, req, dma, length,
861 					last_one, false);
862 
863 			if (last_one)
864 				break;
865 		}
866 	}
867 }
868 
869 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
870 		int start_new)
871 {
872 	struct dwc3_gadget_ep_cmd_params params;
873 	struct dwc3_request		*req;
874 	struct dwc3			*dwc = dep->dwc;
875 	int				ret;
876 	u32				cmd;
877 
878 	if (start_new && (dep->flags & DWC3_EP_BUSY)) {
879 		dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
880 		return -EBUSY;
881 	}
882 	dep->flags &= ~DWC3_EP_PENDING_REQUEST;
883 
884 	/*
885 	 * If we are getting here after a short-out-packet we don't enqueue any
886 	 * new requests as we try to set the IOC bit only on the last request.
887 	 */
888 	if (start_new) {
889 		if (list_empty(&dep->req_queued))
890 			dwc3_prepare_trbs(dep, start_new);
891 
892 		/* req points to the first request which will be sent */
893 		req = next_request(&dep->req_queued);
894 	} else {
895 		dwc3_prepare_trbs(dep, start_new);
896 
897 		/*
898 		 * req points to the first request where HWO changed from 0 to 1
899 		 */
900 		req = next_request(&dep->req_queued);
901 	}
902 	if (!req) {
903 		dep->flags |= DWC3_EP_PENDING_REQUEST;
904 		return 0;
905 	}
906 
907 	memset(&params, 0, sizeof(params));
908 	params.param0 = upper_32_bits(req->trb_dma);
909 	params.param1 = lower_32_bits(req->trb_dma);
910 
911 	if (start_new)
912 		cmd = DWC3_DEPCMD_STARTTRANSFER;
913 	else
914 		cmd = DWC3_DEPCMD_UPDATETRANSFER;
915 
916 	cmd |= DWC3_DEPCMD_PARAM(cmd_param);
917 	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
918 	if (ret < 0) {
919 		dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
920 
921 		/*
922 		 * FIXME we need to iterate over the list of requests
923 		 * here and stop, unmap, free and del each of the linked
924 		 * requests instead of what we do now.
925 		 */
926 		usb_gadget_unmap_request(&dwc->gadget, &req->request,
927 				req->direction);
928 		list_del(&req->list);
929 		return ret;
930 	}
931 
932 	dep->flags |= DWC3_EP_BUSY;
933 	dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
934 			dep->number);
935 
936 	WARN_ON_ONCE(!dep->res_trans_idx);
937 
938 	return 0;
939 }
940 
941 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
942 {
943 	struct dwc3		*dwc = dep->dwc;
944 	int			ret;
945 
946 	req->request.actual	= 0;
947 	req->request.status	= -EINPROGRESS;
948 	req->direction		= dep->direction;
949 	req->epnum		= dep->number;
950 
951 	/*
952 	 * We only add to our list of requests now and
953 	 * start consuming the list once we get XferNotReady
954 	 * IRQ.
955 	 *
956 	 * That way, we avoid doing anything that we don't need
957 	 * to do now and defer it until the point we receive a
958 	 * particular token from the Host side.
959 	 *
960 	 * This will also avoid Host cancelling URBs due to too
961 	 * many NAKs.
962 	 */
963 	ret = usb_gadget_map_request(&dwc->gadget, &req->request,
964 			dep->direction);
965 	if (ret)
966 		return ret;
967 
968 	list_add_tail(&req->list, &dep->request_list);
969 
970 	/*
971 	 * There is one special case: XferNotReady with
972 	 * empty list of requests. We need to kick the
973 	 * transfer here in that situation, otherwise
974 	 * we will be NAKing forever.
975 	 *
976 	 * If we get XferNotReady before gadget driver
977 	 * has a chance to queue a request, we will ACK
978 	 * the IRQ but won't be able to receive the data
979 	 * until the next request is queued. The following
980 	 * code is handling exactly that.
981 	 */
982 	if (dep->flags & DWC3_EP_PENDING_REQUEST) {
983 		int ret;
984 		int start_trans;
985 
986 		start_trans = 1;
987 		if (usb_endpoint_xfer_isoc(dep->desc) &&
988 				(dep->flags & DWC3_EP_BUSY))
989 			start_trans = 0;
990 
991 		ret = __dwc3_gadget_kick_transfer(dep, 0, start_trans);
992 		if (ret && ret != -EBUSY) {
993 			struct dwc3	*dwc = dep->dwc;
994 
995 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
996 					dep->name);
997 		}
998 	};
999 
1000 	return 0;
1001 }
1002 
1003 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1004 	gfp_t gfp_flags)
1005 {
1006 	struct dwc3_request		*req = to_dwc3_request(request);
1007 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1008 	struct dwc3			*dwc = dep->dwc;
1009 
1010 	unsigned long			flags;
1011 
1012 	int				ret;
1013 
1014 	if (!dep->desc) {
1015 		dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1016 				request, ep->name);
1017 		return -ESHUTDOWN;
1018 	}
1019 
1020 	dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1021 			request, ep->name, request->length);
1022 
1023 	spin_lock_irqsave(&dwc->lock, flags);
1024 	ret = __dwc3_gadget_ep_queue(dep, req);
1025 	spin_unlock_irqrestore(&dwc->lock, flags);
1026 
1027 	return ret;
1028 }
1029 
1030 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1031 		struct usb_request *request)
1032 {
1033 	struct dwc3_request		*req = to_dwc3_request(request);
1034 	struct dwc3_request		*r = NULL;
1035 
1036 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1037 	struct dwc3			*dwc = dep->dwc;
1038 
1039 	unsigned long			flags;
1040 	int				ret = 0;
1041 
1042 	spin_lock_irqsave(&dwc->lock, flags);
1043 
1044 	list_for_each_entry(r, &dep->request_list, list) {
1045 		if (r == req)
1046 			break;
1047 	}
1048 
1049 	if (r != req) {
1050 		list_for_each_entry(r, &dep->req_queued, list) {
1051 			if (r == req)
1052 				break;
1053 		}
1054 		if (r == req) {
1055 			/* wait until it is processed */
1056 			dwc3_stop_active_transfer(dwc, dep->number);
1057 			goto out0;
1058 		}
1059 		dev_err(dwc->dev, "request %p was not queued to %s\n",
1060 				request, ep->name);
1061 		ret = -EINVAL;
1062 		goto out0;
1063 	}
1064 
1065 	/* giveback the request */
1066 	dwc3_gadget_giveback(dep, req, -ECONNRESET);
1067 
1068 out0:
1069 	spin_unlock_irqrestore(&dwc->lock, flags);
1070 
1071 	return ret;
1072 }
1073 
1074 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1075 {
1076 	struct dwc3_gadget_ep_cmd_params	params;
1077 	struct dwc3				*dwc = dep->dwc;
1078 	int					ret;
1079 
1080 	memset(&params, 0x00, sizeof(params));
1081 
1082 	if (value) {
1083 		if (dep->number == 0 || dep->number == 1) {
1084 			/*
1085 			 * Whenever EP0 is stalled, we will restart
1086 			 * the state machine, thus moving back to
1087 			 * Setup Phase
1088 			 */
1089 			dwc->ep0state = EP0_SETUP_PHASE;
1090 		}
1091 
1092 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1093 			DWC3_DEPCMD_SETSTALL, &params);
1094 		if (ret)
1095 			dev_err(dwc->dev, "failed to %s STALL on %s\n",
1096 					value ? "set" : "clear",
1097 					dep->name);
1098 		else
1099 			dep->flags |= DWC3_EP_STALL;
1100 	} else {
1101 		if (dep->flags & DWC3_EP_WEDGE)
1102 			return 0;
1103 
1104 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1105 			DWC3_DEPCMD_CLEARSTALL, &params);
1106 		if (ret)
1107 			dev_err(dwc->dev, "failed to %s STALL on %s\n",
1108 					value ? "set" : "clear",
1109 					dep->name);
1110 		else
1111 			dep->flags &= ~DWC3_EP_STALL;
1112 	}
1113 
1114 	return ret;
1115 }
1116 
1117 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1118 {
1119 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1120 	struct dwc3			*dwc = dep->dwc;
1121 
1122 	unsigned long			flags;
1123 
1124 	int				ret;
1125 
1126 	spin_lock_irqsave(&dwc->lock, flags);
1127 
1128 	if (usb_endpoint_xfer_isoc(dep->desc)) {
1129 		dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1130 		ret = -EINVAL;
1131 		goto out;
1132 	}
1133 
1134 	ret = __dwc3_gadget_ep_set_halt(dep, value);
1135 out:
1136 	spin_unlock_irqrestore(&dwc->lock, flags);
1137 
1138 	return ret;
1139 }
1140 
1141 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1142 {
1143 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1144 	struct dwc3			*dwc = dep->dwc;
1145 	unsigned long			flags;
1146 
1147 	spin_lock_irqsave(&dwc->lock, flags);
1148 	dep->flags |= DWC3_EP_WEDGE;
1149 	spin_unlock_irqrestore(&dwc->lock, flags);
1150 
1151 	return dwc3_gadget_ep_set_halt(ep, 1);
1152 }
1153 
1154 /* -------------------------------------------------------------------------- */
1155 
1156 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1157 	.bLength	= USB_DT_ENDPOINT_SIZE,
1158 	.bDescriptorType = USB_DT_ENDPOINT,
1159 	.bmAttributes	= USB_ENDPOINT_XFER_CONTROL,
1160 };
1161 
1162 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1163 	.enable		= dwc3_gadget_ep0_enable,
1164 	.disable	= dwc3_gadget_ep0_disable,
1165 	.alloc_request	= dwc3_gadget_ep_alloc_request,
1166 	.free_request	= dwc3_gadget_ep_free_request,
1167 	.queue		= dwc3_gadget_ep0_queue,
1168 	.dequeue	= dwc3_gadget_ep_dequeue,
1169 	.set_halt	= dwc3_gadget_ep_set_halt,
1170 	.set_wedge	= dwc3_gadget_ep_set_wedge,
1171 };
1172 
1173 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1174 	.enable		= dwc3_gadget_ep_enable,
1175 	.disable	= dwc3_gadget_ep_disable,
1176 	.alloc_request	= dwc3_gadget_ep_alloc_request,
1177 	.free_request	= dwc3_gadget_ep_free_request,
1178 	.queue		= dwc3_gadget_ep_queue,
1179 	.dequeue	= dwc3_gadget_ep_dequeue,
1180 	.set_halt	= dwc3_gadget_ep_set_halt,
1181 	.set_wedge	= dwc3_gadget_ep_set_wedge,
1182 };
1183 
1184 /* -------------------------------------------------------------------------- */
1185 
1186 static int dwc3_gadget_get_frame(struct usb_gadget *g)
1187 {
1188 	struct dwc3		*dwc = gadget_to_dwc(g);
1189 	u32			reg;
1190 
1191 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1192 	return DWC3_DSTS_SOFFN(reg);
1193 }
1194 
1195 static int dwc3_gadget_wakeup(struct usb_gadget *g)
1196 {
1197 	struct dwc3		*dwc = gadget_to_dwc(g);
1198 
1199 	unsigned long		timeout;
1200 	unsigned long		flags;
1201 
1202 	u32			reg;
1203 
1204 	int			ret = 0;
1205 
1206 	u8			link_state;
1207 	u8			speed;
1208 
1209 	spin_lock_irqsave(&dwc->lock, flags);
1210 
1211 	/*
1212 	 * According to the Databook Remote wakeup request should
1213 	 * be issued only when the device is in early suspend state.
1214 	 *
1215 	 * We can check that via USB Link State bits in DSTS register.
1216 	 */
1217 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1218 
1219 	speed = reg & DWC3_DSTS_CONNECTSPD;
1220 	if (speed == DWC3_DSTS_SUPERSPEED) {
1221 		dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1222 		ret = -EINVAL;
1223 		goto out;
1224 	}
1225 
1226 	link_state = DWC3_DSTS_USBLNKST(reg);
1227 
1228 	switch (link_state) {
1229 	case DWC3_LINK_STATE_RX_DET:	/* in HS, means Early Suspend */
1230 	case DWC3_LINK_STATE_U3:	/* in HS, means SUSPEND */
1231 		break;
1232 	default:
1233 		dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1234 				link_state);
1235 		ret = -EINVAL;
1236 		goto out;
1237 	}
1238 
1239 	ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1240 	if (ret < 0) {
1241 		dev_err(dwc->dev, "failed to put link in Recovery\n");
1242 		goto out;
1243 	}
1244 
1245 	/* write zeroes to Link Change Request */
1246 	reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1247 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1248 
1249 	/* poll until Link State changes to ON */
1250 	timeout = jiffies + msecs_to_jiffies(100);
1251 
1252 	while (!time_after(jiffies, timeout)) {
1253 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1254 
1255 		/* in HS, means ON */
1256 		if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1257 			break;
1258 	}
1259 
1260 	if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1261 		dev_err(dwc->dev, "failed to send remote wakeup\n");
1262 		ret = -EINVAL;
1263 	}
1264 
1265 out:
1266 	spin_unlock_irqrestore(&dwc->lock, flags);
1267 
1268 	return ret;
1269 }
1270 
1271 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1272 		int is_selfpowered)
1273 {
1274 	struct dwc3		*dwc = gadget_to_dwc(g);
1275 	unsigned long		flags;
1276 
1277 	spin_lock_irqsave(&dwc->lock, flags);
1278 	dwc->is_selfpowered = !!is_selfpowered;
1279 	spin_unlock_irqrestore(&dwc->lock, flags);
1280 
1281 	return 0;
1282 }
1283 
1284 static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
1285 {
1286 	u32			reg;
1287 	u32			timeout = 500;
1288 
1289 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1290 	if (is_on) {
1291 		reg &= ~DWC3_DCTL_TRGTULST_MASK;
1292 		reg |= (DWC3_DCTL_RUN_STOP
1293 				| DWC3_DCTL_TRGTULST_RX_DET);
1294 	} else {
1295 		reg &= ~DWC3_DCTL_RUN_STOP;
1296 	}
1297 
1298 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1299 
1300 	do {
1301 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1302 		if (is_on) {
1303 			if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1304 				break;
1305 		} else {
1306 			if (reg & DWC3_DSTS_DEVCTRLHLT)
1307 				break;
1308 		}
1309 		timeout--;
1310 		if (!timeout)
1311 			break;
1312 		udelay(1);
1313 	} while (1);
1314 
1315 	dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1316 			dwc->gadget_driver
1317 			? dwc->gadget_driver->function : "no-function",
1318 			is_on ? "connect" : "disconnect");
1319 }
1320 
1321 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1322 {
1323 	struct dwc3		*dwc = gadget_to_dwc(g);
1324 	unsigned long		flags;
1325 
1326 	is_on = !!is_on;
1327 
1328 	spin_lock_irqsave(&dwc->lock, flags);
1329 	dwc3_gadget_run_stop(dwc, is_on);
1330 	spin_unlock_irqrestore(&dwc->lock, flags);
1331 
1332 	return 0;
1333 }
1334 
1335 static int dwc3_gadget_start(struct usb_gadget *g,
1336 		struct usb_gadget_driver *driver)
1337 {
1338 	struct dwc3		*dwc = gadget_to_dwc(g);
1339 	struct dwc3_ep		*dep;
1340 	unsigned long		flags;
1341 	int			ret = 0;
1342 	u32			reg;
1343 
1344 	spin_lock_irqsave(&dwc->lock, flags);
1345 
1346 	if (dwc->gadget_driver) {
1347 		dev_err(dwc->dev, "%s is already bound to %s\n",
1348 				dwc->gadget.name,
1349 				dwc->gadget_driver->driver.name);
1350 		ret = -EBUSY;
1351 		goto err0;
1352 	}
1353 
1354 	dwc->gadget_driver	= driver;
1355 	dwc->gadget.dev.driver	= &driver->driver;
1356 
1357 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1358 	reg &= ~(DWC3_DCFG_SPEED_MASK);
1359 	reg |= dwc->maximum_speed;
1360 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1361 
1362 	dwc->start_config_issued = false;
1363 
1364 	/* Start with SuperSpeed Default */
1365 	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1366 
1367 	dep = dwc->eps[0];
1368 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
1369 	if (ret) {
1370 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1371 		goto err0;
1372 	}
1373 
1374 	dep = dwc->eps[1];
1375 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
1376 	if (ret) {
1377 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1378 		goto err1;
1379 	}
1380 
1381 	/* begin to receive SETUP packets */
1382 	dwc->ep0state = EP0_SETUP_PHASE;
1383 	dwc3_ep0_out_start(dwc);
1384 
1385 	spin_unlock_irqrestore(&dwc->lock, flags);
1386 
1387 	return 0;
1388 
1389 err1:
1390 	__dwc3_gadget_ep_disable(dwc->eps[0]);
1391 
1392 err0:
1393 	spin_unlock_irqrestore(&dwc->lock, flags);
1394 
1395 	return ret;
1396 }
1397 
1398 static int dwc3_gadget_stop(struct usb_gadget *g,
1399 		struct usb_gadget_driver *driver)
1400 {
1401 	struct dwc3		*dwc = gadget_to_dwc(g);
1402 	unsigned long		flags;
1403 
1404 	spin_lock_irqsave(&dwc->lock, flags);
1405 
1406 	__dwc3_gadget_ep_disable(dwc->eps[0]);
1407 	__dwc3_gadget_ep_disable(dwc->eps[1]);
1408 
1409 	dwc->gadget_driver	= NULL;
1410 	dwc->gadget.dev.driver	= NULL;
1411 
1412 	spin_unlock_irqrestore(&dwc->lock, flags);
1413 
1414 	return 0;
1415 }
1416 static const struct usb_gadget_ops dwc3_gadget_ops = {
1417 	.get_frame		= dwc3_gadget_get_frame,
1418 	.wakeup			= dwc3_gadget_wakeup,
1419 	.set_selfpowered	= dwc3_gadget_set_selfpowered,
1420 	.pullup			= dwc3_gadget_pullup,
1421 	.udc_start		= dwc3_gadget_start,
1422 	.udc_stop		= dwc3_gadget_stop,
1423 };
1424 
1425 /* -------------------------------------------------------------------------- */
1426 
1427 static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1428 {
1429 	struct dwc3_ep			*dep;
1430 	u8				epnum;
1431 
1432 	INIT_LIST_HEAD(&dwc->gadget.ep_list);
1433 
1434 	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1435 		dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1436 		if (!dep) {
1437 			dev_err(dwc->dev, "can't allocate endpoint %d\n",
1438 					epnum);
1439 			return -ENOMEM;
1440 		}
1441 
1442 		dep->dwc = dwc;
1443 		dep->number = epnum;
1444 		dwc->eps[epnum] = dep;
1445 
1446 		snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1447 				(epnum & 1) ? "in" : "out");
1448 		dep->endpoint.name = dep->name;
1449 		dep->direction = (epnum & 1);
1450 
1451 		if (epnum == 0 || epnum == 1) {
1452 			dep->endpoint.maxpacket = 512;
1453 			dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1454 			if (!epnum)
1455 				dwc->gadget.ep0 = &dep->endpoint;
1456 		} else {
1457 			int		ret;
1458 
1459 			dep->endpoint.maxpacket = 1024;
1460 			dep->endpoint.max_streams = 15;
1461 			dep->endpoint.ops = &dwc3_gadget_ep_ops;
1462 			list_add_tail(&dep->endpoint.ep_list,
1463 					&dwc->gadget.ep_list);
1464 
1465 			ret = dwc3_alloc_trb_pool(dep);
1466 			if (ret)
1467 				return ret;
1468 		}
1469 
1470 		INIT_LIST_HEAD(&dep->request_list);
1471 		INIT_LIST_HEAD(&dep->req_queued);
1472 	}
1473 
1474 	return 0;
1475 }
1476 
1477 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1478 {
1479 	struct dwc3_ep			*dep;
1480 	u8				epnum;
1481 
1482 	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1483 		dep = dwc->eps[epnum];
1484 		dwc3_free_trb_pool(dep);
1485 
1486 		if (epnum != 0 && epnum != 1)
1487 			list_del(&dep->endpoint.ep_list);
1488 
1489 		kfree(dep);
1490 	}
1491 }
1492 
1493 static void dwc3_gadget_release(struct device *dev)
1494 {
1495 	dev_dbg(dev, "%s\n", __func__);
1496 }
1497 
1498 /* -------------------------------------------------------------------------- */
1499 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1500 		const struct dwc3_event_depevt *event, int status)
1501 {
1502 	struct dwc3_request	*req;
1503 	struct dwc3_trb		*trb;
1504 	unsigned int		count;
1505 	unsigned int		s_pkt = 0;
1506 
1507 	do {
1508 		req = next_request(&dep->req_queued);
1509 		if (!req) {
1510 			WARN_ON_ONCE(1);
1511 			return 1;
1512 		}
1513 
1514 		trb = req->trb;
1515 
1516 		if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1517 			/*
1518 			 * We continue despite the error. There is not much we
1519 			 * can do. If we don't clean it up we loop forever. If
1520 			 * we skip the TRB then it gets overwritten after a
1521 			 * while since we use them in a ring buffer. A BUG()
1522 			 * would help. Lets hope that if this occurs, someone
1523 			 * fixes the root cause instead of looking away :)
1524 			 */
1525 			dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1526 					dep->name, req->trb);
1527 		count = trb->size & DWC3_TRB_SIZE_MASK;
1528 
1529 		if (dep->direction) {
1530 			if (count) {
1531 				dev_err(dwc->dev, "incomplete IN transfer %s\n",
1532 						dep->name);
1533 				status = -ECONNRESET;
1534 			}
1535 		} else {
1536 			if (count && (event->status & DEPEVT_STATUS_SHORT))
1537 				s_pkt = 1;
1538 		}
1539 
1540 		/*
1541 		 * We assume here we will always receive the entire data block
1542 		 * which we should receive. Meaning, if we program RX to
1543 		 * receive 4K but we receive only 2K, we assume that's all we
1544 		 * should receive and we simply bounce the request back to the
1545 		 * gadget driver for further processing.
1546 		 */
1547 		req->request.actual += req->request.length - count;
1548 		dwc3_gadget_giveback(dep, req, status);
1549 		if (s_pkt)
1550 			break;
1551 		if ((event->status & DEPEVT_STATUS_LST) &&
1552 				(trb->ctrl & DWC3_TRB_CTRL_LST))
1553 			break;
1554 		if ((event->status & DEPEVT_STATUS_IOC) &&
1555 				(trb->ctrl & DWC3_TRB_CTRL_IOC))
1556 			break;
1557 	} while (1);
1558 
1559 	if ((event->status & DEPEVT_STATUS_IOC) &&
1560 			(trb->ctrl & DWC3_TRB_CTRL_IOC))
1561 		return 0;
1562 	return 1;
1563 }
1564 
1565 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1566 		struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1567 		int start_new)
1568 {
1569 	unsigned		status = 0;
1570 	int			clean_busy;
1571 
1572 	if (event->status & DEPEVT_STATUS_BUSERR)
1573 		status = -ECONNRESET;
1574 
1575 	clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
1576 	if (clean_busy)
1577 		dep->flags &= ~DWC3_EP_BUSY;
1578 
1579 	/*
1580 	 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1581 	 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1582 	 */
1583 	if (dwc->revision < DWC3_REVISION_183A) {
1584 		u32		reg;
1585 		int		i;
1586 
1587 		for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1588 			struct dwc3_ep	*dep = dwc->eps[i];
1589 
1590 			if (!(dep->flags & DWC3_EP_ENABLED))
1591 				continue;
1592 
1593 			if (!list_empty(&dep->req_queued))
1594 				return;
1595 		}
1596 
1597 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1598 		reg |= dwc->u1u2;
1599 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1600 
1601 		dwc->u1u2 = 0;
1602 	}
1603 }
1604 
1605 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1606 		struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1607 {
1608 	u32 uf, mask;
1609 
1610 	if (list_empty(&dep->request_list)) {
1611 		dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1612 			dep->name);
1613 		return;
1614 	}
1615 
1616 	mask = ~(dep->interval - 1);
1617 	uf = event->parameters & mask;
1618 	/* 4 micro frames in the future */
1619 	uf += dep->interval * 4;
1620 
1621 	__dwc3_gadget_kick_transfer(dep, uf, 1);
1622 }
1623 
1624 static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep,
1625 		const struct dwc3_event_depevt *event)
1626 {
1627 	struct dwc3 *dwc = dep->dwc;
1628 	struct dwc3_event_depevt mod_ev = *event;
1629 
1630 	/*
1631 	 * We were asked to remove one request. It is possible that this
1632 	 * request and a few others were started together and have the same
1633 	 * transfer index. Since we stopped the complete endpoint we don't
1634 	 * know how many requests were already completed (and not yet)
1635 	 * reported and how could be done (later). We purge them all until
1636 	 * the end of the list.
1637 	 */
1638 	mod_ev.status = DEPEVT_STATUS_LST;
1639 	dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN);
1640 	dep->flags &= ~DWC3_EP_BUSY;
1641 	/* pending requests are ignored and are queued on XferNotReady */
1642 }
1643 
1644 static void dwc3_ep_cmd_compl(struct dwc3_ep *dep,
1645 		const struct dwc3_event_depevt *event)
1646 {
1647 	u32 param = event->parameters;
1648 	u32 cmd_type = (param >> 8) & ((1 << 5) - 1);
1649 
1650 	switch (cmd_type) {
1651 	case DWC3_DEPCMD_ENDTRANSFER:
1652 		dwc3_process_ep_cmd_complete(dep, event);
1653 		break;
1654 	case DWC3_DEPCMD_STARTTRANSFER:
1655 		dep->res_trans_idx = param & 0x7f;
1656 		break;
1657 	default:
1658 		printk(KERN_ERR "%s() unknown /unexpected type: %d\n",
1659 				__func__, cmd_type);
1660 		break;
1661 	};
1662 }
1663 
1664 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1665 		const struct dwc3_event_depevt *event)
1666 {
1667 	struct dwc3_ep		*dep;
1668 	u8			epnum = event->endpoint_number;
1669 
1670 	dep = dwc->eps[epnum];
1671 
1672 	dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1673 			dwc3_ep_event_string(event->endpoint_event));
1674 
1675 	if (epnum == 0 || epnum == 1) {
1676 		dwc3_ep0_interrupt(dwc, event);
1677 		return;
1678 	}
1679 
1680 	switch (event->endpoint_event) {
1681 	case DWC3_DEPEVT_XFERCOMPLETE:
1682 		dep->res_trans_idx = 0;
1683 
1684 		if (usb_endpoint_xfer_isoc(dep->desc)) {
1685 			dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1686 					dep->name);
1687 			return;
1688 		}
1689 
1690 		dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1691 		break;
1692 	case DWC3_DEPEVT_XFERINPROGRESS:
1693 		if (!usb_endpoint_xfer_isoc(dep->desc)) {
1694 			dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1695 					dep->name);
1696 			return;
1697 		}
1698 
1699 		dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1700 		break;
1701 	case DWC3_DEPEVT_XFERNOTREADY:
1702 		if (usb_endpoint_xfer_isoc(dep->desc)) {
1703 			dwc3_gadget_start_isoc(dwc, dep, event);
1704 		} else {
1705 			int ret;
1706 
1707 			dev_vdbg(dwc->dev, "%s: reason %s\n",
1708 					dep->name, event->status &
1709 					DEPEVT_STATUS_TRANSFER_ACTIVE
1710 					? "Transfer Active"
1711 					: "Transfer Not Active");
1712 
1713 			ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1714 			if (!ret || ret == -EBUSY)
1715 				return;
1716 
1717 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1718 					dep->name);
1719 		}
1720 
1721 		break;
1722 	case DWC3_DEPEVT_STREAMEVT:
1723 		if (!usb_endpoint_xfer_bulk(dep->desc)) {
1724 			dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1725 					dep->name);
1726 			return;
1727 		}
1728 
1729 		switch (event->status) {
1730 		case DEPEVT_STREAMEVT_FOUND:
1731 			dev_vdbg(dwc->dev, "Stream %d found and started\n",
1732 					event->parameters);
1733 
1734 			break;
1735 		case DEPEVT_STREAMEVT_NOTFOUND:
1736 			/* FALLTHROUGH */
1737 		default:
1738 			dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1739 		}
1740 		break;
1741 	case DWC3_DEPEVT_RXTXFIFOEVT:
1742 		dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1743 		break;
1744 	case DWC3_DEPEVT_EPCMDCMPLT:
1745 		dwc3_ep_cmd_compl(dep, event);
1746 		break;
1747 	}
1748 }
1749 
1750 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1751 {
1752 	if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1753 		spin_unlock(&dwc->lock);
1754 		dwc->gadget_driver->disconnect(&dwc->gadget);
1755 		spin_lock(&dwc->lock);
1756 	}
1757 }
1758 
1759 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
1760 {
1761 	struct dwc3_ep *dep;
1762 	struct dwc3_gadget_ep_cmd_params params;
1763 	u32 cmd;
1764 	int ret;
1765 
1766 	dep = dwc->eps[epnum];
1767 
1768 	WARN_ON(!dep->res_trans_idx);
1769 	if (dep->res_trans_idx) {
1770 		cmd = DWC3_DEPCMD_ENDTRANSFER;
1771 		cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
1772 		cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx);
1773 		memset(&params, 0, sizeof(params));
1774 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
1775 		WARN_ON_ONCE(ret);
1776 		dep->res_trans_idx = 0;
1777 	}
1778 }
1779 
1780 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
1781 {
1782 	u32 epnum;
1783 
1784 	for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1785 		struct dwc3_ep *dep;
1786 
1787 		dep = dwc->eps[epnum];
1788 		if (!(dep->flags & DWC3_EP_ENABLED))
1789 			continue;
1790 
1791 		dwc3_remove_requests(dwc, dep);
1792 	}
1793 }
1794 
1795 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
1796 {
1797 	u32 epnum;
1798 
1799 	for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1800 		struct dwc3_ep *dep;
1801 		struct dwc3_gadget_ep_cmd_params params;
1802 		int ret;
1803 
1804 		dep = dwc->eps[epnum];
1805 
1806 		if (!(dep->flags & DWC3_EP_STALL))
1807 			continue;
1808 
1809 		dep->flags &= ~DWC3_EP_STALL;
1810 
1811 		memset(&params, 0, sizeof(params));
1812 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1813 				DWC3_DEPCMD_CLEARSTALL, &params);
1814 		WARN_ON_ONCE(ret);
1815 	}
1816 }
1817 
1818 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
1819 {
1820 	dev_vdbg(dwc->dev, "%s\n", __func__);
1821 #if 0
1822 	XXX
1823 	U1/U2 is powersave optimization. Skip it for now. Anyway we need to
1824 	enable it before we can disable it.
1825 
1826 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1827 	reg &= ~DWC3_DCTL_INITU1ENA;
1828 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1829 
1830 	reg &= ~DWC3_DCTL_INITU2ENA;
1831 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1832 #endif
1833 
1834 	dwc3_stop_active_transfers(dwc);
1835 	dwc3_disconnect_gadget(dwc);
1836 	dwc->start_config_issued = false;
1837 
1838 	dwc->gadget.speed = USB_SPEED_UNKNOWN;
1839 	dwc->setup_packet_pending = false;
1840 }
1841 
1842 static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on)
1843 {
1844 	u32			reg;
1845 
1846 	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
1847 
1848 	if (on)
1849 		reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
1850 	else
1851 		reg |= DWC3_GUSB3PIPECTL_SUSPHY;
1852 
1853 	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
1854 }
1855 
1856 static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on)
1857 {
1858 	u32			reg;
1859 
1860 	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
1861 
1862 	if (on)
1863 		reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
1864 	else
1865 		reg |= DWC3_GUSB2PHYCFG_SUSPHY;
1866 
1867 	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
1868 }
1869 
1870 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
1871 {
1872 	u32			reg;
1873 
1874 	dev_vdbg(dwc->dev, "%s\n", __func__);
1875 
1876 	/*
1877 	 * WORKAROUND: DWC3 revisions <1.88a have an issue which
1878 	 * would cause a missing Disconnect Event if there's a
1879 	 * pending Setup Packet in the FIFO.
1880 	 *
1881 	 * There's no suggested workaround on the official Bug
1882 	 * report, which states that "unless the driver/application
1883 	 * is doing any special handling of a disconnect event,
1884 	 * there is no functional issue".
1885 	 *
1886 	 * Unfortunately, it turns out that we _do_ some special
1887 	 * handling of a disconnect event, namely complete all
1888 	 * pending transfers, notify gadget driver of the
1889 	 * disconnection, and so on.
1890 	 *
1891 	 * Our suggested workaround is to follow the Disconnect
1892 	 * Event steps here, instead, based on a setup_packet_pending
1893 	 * flag. Such flag gets set whenever we have a XferNotReady
1894 	 * event on EP0 and gets cleared on XferComplete for the
1895 	 * same endpoint.
1896 	 *
1897 	 * Refers to:
1898 	 *
1899 	 * STAR#9000466709: RTL: Device : Disconnect event not
1900 	 * generated if setup packet pending in FIFO
1901 	 */
1902 	if (dwc->revision < DWC3_REVISION_188A) {
1903 		if (dwc->setup_packet_pending)
1904 			dwc3_gadget_disconnect_interrupt(dwc);
1905 	}
1906 
1907 	/* after reset -> Default State */
1908 	dwc->dev_state = DWC3_DEFAULT_STATE;
1909 
1910 	/* Enable PHYs */
1911 	dwc3_gadget_usb2_phy_power(dwc, true);
1912 	dwc3_gadget_usb3_phy_power(dwc, true);
1913 
1914 	if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
1915 		dwc3_disconnect_gadget(dwc);
1916 
1917 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1918 	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
1919 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1920 	dwc->test_mode = false;
1921 
1922 	dwc3_stop_active_transfers(dwc);
1923 	dwc3_clear_stall_all_ep(dwc);
1924 	dwc->start_config_issued = false;
1925 
1926 	/* Reset device address to zero */
1927 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1928 	reg &= ~(DWC3_DCFG_DEVADDR_MASK);
1929 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1930 }
1931 
1932 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
1933 {
1934 	u32 reg;
1935 	u32 usb30_clock = DWC3_GCTL_CLK_BUS;
1936 
1937 	/*
1938 	 * We change the clock only at SS but I dunno why I would want to do
1939 	 * this. Maybe it becomes part of the power saving plan.
1940 	 */
1941 
1942 	if (speed != DWC3_DSTS_SUPERSPEED)
1943 		return;
1944 
1945 	/*
1946 	 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
1947 	 * each time on Connect Done.
1948 	 */
1949 	if (!usb30_clock)
1950 		return;
1951 
1952 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1953 	reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
1954 	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1955 }
1956 
1957 static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed)
1958 {
1959 	switch (speed) {
1960 	case USB_SPEED_SUPER:
1961 		dwc3_gadget_usb2_phy_power(dwc, false);
1962 		break;
1963 	case USB_SPEED_HIGH:
1964 	case USB_SPEED_FULL:
1965 	case USB_SPEED_LOW:
1966 		dwc3_gadget_usb3_phy_power(dwc, false);
1967 		break;
1968 	}
1969 }
1970 
1971 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
1972 {
1973 	struct dwc3_gadget_ep_cmd_params params;
1974 	struct dwc3_ep		*dep;
1975 	int			ret;
1976 	u32			reg;
1977 	u8			speed;
1978 
1979 	dev_vdbg(dwc->dev, "%s\n", __func__);
1980 
1981 	memset(&params, 0x00, sizeof(params));
1982 
1983 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1984 	speed = reg & DWC3_DSTS_CONNECTSPD;
1985 	dwc->speed = speed;
1986 
1987 	dwc3_update_ram_clk_sel(dwc, speed);
1988 
1989 	switch (speed) {
1990 	case DWC3_DCFG_SUPERSPEED:
1991 		/*
1992 		 * WORKAROUND: DWC3 revisions <1.90a have an issue which
1993 		 * would cause a missing USB3 Reset event.
1994 		 *
1995 		 * In such situations, we should force a USB3 Reset
1996 		 * event by calling our dwc3_gadget_reset_interrupt()
1997 		 * routine.
1998 		 *
1999 		 * Refers to:
2000 		 *
2001 		 * STAR#9000483510: RTL: SS : USB3 reset event may
2002 		 * not be generated always when the link enters poll
2003 		 */
2004 		if (dwc->revision < DWC3_REVISION_190A)
2005 			dwc3_gadget_reset_interrupt(dwc);
2006 
2007 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2008 		dwc->gadget.ep0->maxpacket = 512;
2009 		dwc->gadget.speed = USB_SPEED_SUPER;
2010 		break;
2011 	case DWC3_DCFG_HIGHSPEED:
2012 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2013 		dwc->gadget.ep0->maxpacket = 64;
2014 		dwc->gadget.speed = USB_SPEED_HIGH;
2015 		break;
2016 	case DWC3_DCFG_FULLSPEED2:
2017 	case DWC3_DCFG_FULLSPEED1:
2018 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2019 		dwc->gadget.ep0->maxpacket = 64;
2020 		dwc->gadget.speed = USB_SPEED_FULL;
2021 		break;
2022 	case DWC3_DCFG_LOWSPEED:
2023 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2024 		dwc->gadget.ep0->maxpacket = 8;
2025 		dwc->gadget.speed = USB_SPEED_LOW;
2026 		break;
2027 	}
2028 
2029 	/* Disable unneded PHY */
2030 	dwc3_gadget_disable_phy(dwc, dwc->gadget.speed);
2031 
2032 	dep = dwc->eps[0];
2033 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
2034 	if (ret) {
2035 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2036 		return;
2037 	}
2038 
2039 	dep = dwc->eps[1];
2040 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
2041 	if (ret) {
2042 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2043 		return;
2044 	}
2045 
2046 	/*
2047 	 * Configure PHY via GUSB3PIPECTLn if required.
2048 	 *
2049 	 * Update GTXFIFOSIZn
2050 	 *
2051 	 * In both cases reset values should be sufficient.
2052 	 */
2053 }
2054 
2055 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2056 {
2057 	dev_vdbg(dwc->dev, "%s\n", __func__);
2058 
2059 	/*
2060 	 * TODO take core out of low power mode when that's
2061 	 * implemented.
2062 	 */
2063 
2064 	dwc->gadget_driver->resume(&dwc->gadget);
2065 }
2066 
2067 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2068 		unsigned int evtinfo)
2069 {
2070 	enum dwc3_link_state	next = evtinfo & DWC3_LINK_STATE_MASK;
2071 
2072 	/*
2073 	 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2074 	 * on the link partner, the USB session might do multiple entry/exit
2075 	 * of low power states before a transfer takes place.
2076 	 *
2077 	 * Due to this problem, we might experience lower throughput. The
2078 	 * suggested workaround is to disable DCTL[12:9] bits if we're
2079 	 * transitioning from U1/U2 to U0 and enable those bits again
2080 	 * after a transfer completes and there are no pending transfers
2081 	 * on any of the enabled endpoints.
2082 	 *
2083 	 * This is the first half of that workaround.
2084 	 *
2085 	 * Refers to:
2086 	 *
2087 	 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2088 	 * core send LGO_Ux entering U0
2089 	 */
2090 	if (dwc->revision < DWC3_REVISION_183A) {
2091 		if (next == DWC3_LINK_STATE_U0) {
2092 			u32	u1u2;
2093 			u32	reg;
2094 
2095 			switch (dwc->link_state) {
2096 			case DWC3_LINK_STATE_U1:
2097 			case DWC3_LINK_STATE_U2:
2098 				reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2099 				u1u2 = reg & (DWC3_DCTL_INITU2ENA
2100 						| DWC3_DCTL_ACCEPTU2ENA
2101 						| DWC3_DCTL_INITU1ENA
2102 						| DWC3_DCTL_ACCEPTU1ENA);
2103 
2104 				if (!dwc->u1u2)
2105 					dwc->u1u2 = reg & u1u2;
2106 
2107 				reg &= ~u1u2;
2108 
2109 				dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2110 				break;
2111 			default:
2112 				/* do nothing */
2113 				break;
2114 			}
2115 		}
2116 	}
2117 
2118 	dwc->link_state = next;
2119 
2120 	dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
2121 }
2122 
2123 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2124 		const struct dwc3_event_devt *event)
2125 {
2126 	switch (event->type) {
2127 	case DWC3_DEVICE_EVENT_DISCONNECT:
2128 		dwc3_gadget_disconnect_interrupt(dwc);
2129 		break;
2130 	case DWC3_DEVICE_EVENT_RESET:
2131 		dwc3_gadget_reset_interrupt(dwc);
2132 		break;
2133 	case DWC3_DEVICE_EVENT_CONNECT_DONE:
2134 		dwc3_gadget_conndone_interrupt(dwc);
2135 		break;
2136 	case DWC3_DEVICE_EVENT_WAKEUP:
2137 		dwc3_gadget_wakeup_interrupt(dwc);
2138 		break;
2139 	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2140 		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2141 		break;
2142 	case DWC3_DEVICE_EVENT_EOPF:
2143 		dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2144 		break;
2145 	case DWC3_DEVICE_EVENT_SOF:
2146 		dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2147 		break;
2148 	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2149 		dev_vdbg(dwc->dev, "Erratic Error\n");
2150 		break;
2151 	case DWC3_DEVICE_EVENT_CMD_CMPL:
2152 		dev_vdbg(dwc->dev, "Command Complete\n");
2153 		break;
2154 	case DWC3_DEVICE_EVENT_OVERFLOW:
2155 		dev_vdbg(dwc->dev, "Overflow\n");
2156 		break;
2157 	default:
2158 		dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2159 	}
2160 }
2161 
2162 static void dwc3_process_event_entry(struct dwc3 *dwc,
2163 		const union dwc3_event *event)
2164 {
2165 	/* Endpoint IRQ, handle it and return early */
2166 	if (event->type.is_devspec == 0) {
2167 		/* depevt */
2168 		return dwc3_endpoint_interrupt(dwc, &event->depevt);
2169 	}
2170 
2171 	switch (event->type.type) {
2172 	case DWC3_EVENT_TYPE_DEV:
2173 		dwc3_gadget_interrupt(dwc, &event->devt);
2174 		break;
2175 	/* REVISIT what to do with Carkit and I2C events ? */
2176 	default:
2177 		dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2178 	}
2179 }
2180 
2181 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2182 {
2183 	struct dwc3_event_buffer *evt;
2184 	int left;
2185 	u32 count;
2186 
2187 	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2188 	count &= DWC3_GEVNTCOUNT_MASK;
2189 	if (!count)
2190 		return IRQ_NONE;
2191 
2192 	evt = dwc->ev_buffs[buf];
2193 	left = count;
2194 
2195 	while (left > 0) {
2196 		union dwc3_event event;
2197 
2198 		event.raw = *(u32 *) (evt->buf + evt->lpos);
2199 
2200 		dwc3_process_event_entry(dwc, &event);
2201 		/*
2202 		 * XXX we wrap around correctly to the next entry as almost all
2203 		 * entries are 4 bytes in size. There is one entry which has 12
2204 		 * bytes which is a regular entry followed by 8 bytes data. ATM
2205 		 * I don't know how things are organized if were get next to the
2206 		 * a boundary so I worry about that once we try to handle that.
2207 		 */
2208 		evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2209 		left -= 4;
2210 
2211 		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2212 	}
2213 
2214 	return IRQ_HANDLED;
2215 }
2216 
2217 static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2218 {
2219 	struct dwc3			*dwc = _dwc;
2220 	int				i;
2221 	irqreturn_t			ret = IRQ_NONE;
2222 
2223 	spin_lock(&dwc->lock);
2224 
2225 	for (i = 0; i < dwc->num_event_buffers; i++) {
2226 		irqreturn_t status;
2227 
2228 		status = dwc3_process_event_buf(dwc, i);
2229 		if (status == IRQ_HANDLED)
2230 			ret = status;
2231 	}
2232 
2233 	spin_unlock(&dwc->lock);
2234 
2235 	return ret;
2236 }
2237 
2238 /**
2239  * dwc3_gadget_init - Initializes gadget related registers
2240  * @dwc: pointer to our controller context structure
2241  *
2242  * Returns 0 on success otherwise negative errno.
2243  */
2244 int __devinit dwc3_gadget_init(struct dwc3 *dwc)
2245 {
2246 	u32					reg;
2247 	int					ret;
2248 	int					irq;
2249 
2250 	dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2251 			&dwc->ctrl_req_addr, GFP_KERNEL);
2252 	if (!dwc->ctrl_req) {
2253 		dev_err(dwc->dev, "failed to allocate ctrl request\n");
2254 		ret = -ENOMEM;
2255 		goto err0;
2256 	}
2257 
2258 	dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2259 			&dwc->ep0_trb_addr, GFP_KERNEL);
2260 	if (!dwc->ep0_trb) {
2261 		dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2262 		ret = -ENOMEM;
2263 		goto err1;
2264 	}
2265 
2266 	dwc->setup_buf = kzalloc(sizeof(*dwc->setup_buf) * 2,
2267 			GFP_KERNEL);
2268 	if (!dwc->setup_buf) {
2269 		dev_err(dwc->dev, "failed to allocate setup buffer\n");
2270 		ret = -ENOMEM;
2271 		goto err2;
2272 	}
2273 
2274 	dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2275 			512, &dwc->ep0_bounce_addr, GFP_KERNEL);
2276 	if (!dwc->ep0_bounce) {
2277 		dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2278 		ret = -ENOMEM;
2279 		goto err3;
2280 	}
2281 
2282 	dev_set_name(&dwc->gadget.dev, "gadget");
2283 
2284 	dwc->gadget.ops			= &dwc3_gadget_ops;
2285 	dwc->gadget.max_speed		= USB_SPEED_SUPER;
2286 	dwc->gadget.speed		= USB_SPEED_UNKNOWN;
2287 	dwc->gadget.dev.parent		= dwc->dev;
2288 	dwc->gadget.sg_supported	= true;
2289 
2290 	dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
2291 
2292 	dwc->gadget.dev.dma_parms	= dwc->dev->dma_parms;
2293 	dwc->gadget.dev.dma_mask	= dwc->dev->dma_mask;
2294 	dwc->gadget.dev.release		= dwc3_gadget_release;
2295 	dwc->gadget.name		= "dwc3-gadget";
2296 
2297 	/*
2298 	 * REVISIT: Here we should clear all pending IRQs to be
2299 	 * sure we're starting from a well known location.
2300 	 */
2301 
2302 	ret = dwc3_gadget_init_endpoints(dwc);
2303 	if (ret)
2304 		goto err4;
2305 
2306 	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2307 
2308 	ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED,
2309 			"dwc3", dwc);
2310 	if (ret) {
2311 		dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
2312 				irq, ret);
2313 		goto err5;
2314 	}
2315 
2316 	/* Enable all but Start and End of Frame IRQs */
2317 	reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
2318 			DWC3_DEVTEN_EVNTOVERFLOWEN |
2319 			DWC3_DEVTEN_CMDCMPLTEN |
2320 			DWC3_DEVTEN_ERRTICERREN |
2321 			DWC3_DEVTEN_WKUPEVTEN |
2322 			DWC3_DEVTEN_ULSTCNGEN |
2323 			DWC3_DEVTEN_CONNECTDONEEN |
2324 			DWC3_DEVTEN_USBRSTEN |
2325 			DWC3_DEVTEN_DISCONNEVTEN);
2326 	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
2327 
2328 	ret = device_register(&dwc->gadget.dev);
2329 	if (ret) {
2330 		dev_err(dwc->dev, "failed to register gadget device\n");
2331 		put_device(&dwc->gadget.dev);
2332 		goto err6;
2333 	}
2334 
2335 	ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2336 	if (ret) {
2337 		dev_err(dwc->dev, "failed to register udc\n");
2338 		goto err7;
2339 	}
2340 
2341 	return 0;
2342 
2343 err7:
2344 	device_unregister(&dwc->gadget.dev);
2345 
2346 err6:
2347 	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2348 	free_irq(irq, dwc);
2349 
2350 err5:
2351 	dwc3_gadget_free_endpoints(dwc);
2352 
2353 err4:
2354 	dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2355 			dwc->ep0_bounce_addr);
2356 
2357 err3:
2358 	kfree(dwc->setup_buf);
2359 
2360 err2:
2361 	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2362 			dwc->ep0_trb, dwc->ep0_trb_addr);
2363 
2364 err1:
2365 	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2366 			dwc->ctrl_req, dwc->ctrl_req_addr);
2367 
2368 err0:
2369 	return ret;
2370 }
2371 
2372 void dwc3_gadget_exit(struct dwc3 *dwc)
2373 {
2374 	int			irq;
2375 
2376 	usb_del_gadget_udc(&dwc->gadget);
2377 	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2378 
2379 	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2380 	free_irq(irq, dwc);
2381 
2382 	dwc3_gadget_free_endpoints(dwc);
2383 
2384 	dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2385 			dwc->ep0_bounce_addr);
2386 
2387 	kfree(dwc->setup_buf);
2388 
2389 	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2390 			dwc->ep0_trb, dwc->ep0_trb_addr);
2391 
2392 	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2393 			dwc->ctrl_req, dwc->ctrl_req_addr);
2394 
2395 	device_unregister(&dwc->gadget.dev);
2396 }
2397