1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * core.c - DesignWare USB3 DRD Controller Core file
4 *
5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
6 *
7 * Authors: Felipe Balbi <balbi@ti.com>,
8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
9 */
10
11 #include <linux/clk.h>
12 #include <linux/version.h>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/interrupt.h>
20 #include <linux/ioport.h>
21 #include <linux/io.h>
22 #include <linux/list.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/of.h>
26 #include <linux/of_graph.h>
27 #include <linux/acpi.h>
28 #include <linux/pinctrl/consumer.h>
29 #include <linux/reset.h>
30 #include <linux/bitfield.h>
31
32 #include <linux/usb/ch9.h>
33 #include <linux/usb/gadget.h>
34 #include <linux/usb/of.h>
35 #include <linux/usb/otg.h>
36
37 #include "core.h"
38 #include "gadget.h"
39 #include "io.h"
40
41 #include "debug.h"
42
43 #define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */
44
45 /**
46 * dwc3_get_dr_mode - Validates and sets dr_mode
47 * @dwc: pointer to our context structure
48 */
dwc3_get_dr_mode(struct dwc3 * dwc)49 static int dwc3_get_dr_mode(struct dwc3 *dwc)
50 {
51 enum usb_dr_mode mode;
52 struct device *dev = dwc->dev;
53 unsigned int hw_mode;
54
55 if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
56 dwc->dr_mode = USB_DR_MODE_OTG;
57
58 mode = dwc->dr_mode;
59 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
60
61 switch (hw_mode) {
62 case DWC3_GHWPARAMS0_MODE_GADGET:
63 if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) {
64 dev_err(dev,
65 "Controller does not support host mode.\n");
66 return -EINVAL;
67 }
68 mode = USB_DR_MODE_PERIPHERAL;
69 break;
70 case DWC3_GHWPARAMS0_MODE_HOST:
71 if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) {
72 dev_err(dev,
73 "Controller does not support device mode.\n");
74 return -EINVAL;
75 }
76 mode = USB_DR_MODE_HOST;
77 break;
78 default:
79 if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
80 mode = USB_DR_MODE_HOST;
81 else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
82 mode = USB_DR_MODE_PERIPHERAL;
83
84 /*
85 * DWC_usb31 and DWC_usb3 v3.30a and higher do not support OTG
86 * mode. If the controller supports DRD but the dr_mode is not
87 * specified or set to OTG, then set the mode to peripheral.
88 */
89 if (mode == USB_DR_MODE_OTG && !dwc->edev &&
90 (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) ||
91 !device_property_read_bool(dwc->dev, "usb-role-switch")) &&
92 !DWC3_VER_IS_PRIOR(DWC3, 330A))
93 mode = USB_DR_MODE_PERIPHERAL;
94 }
95
96 if (mode != dwc->dr_mode) {
97 dev_warn(dev,
98 "Configuration mismatch. dr_mode forced to %s\n",
99 mode == USB_DR_MODE_HOST ? "host" : "gadget");
100
101 dwc->dr_mode = mode;
102 }
103
104 return 0;
105 }
106
dwc3_enable_susphy(struct dwc3 * dwc,bool enable)107 void dwc3_enable_susphy(struct dwc3 *dwc, bool enable)
108 {
109 u32 reg;
110
111 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
112 if (enable && !dwc->dis_u3_susphy_quirk)
113 reg |= DWC3_GUSB3PIPECTL_SUSPHY;
114 else
115 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
116
117 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
118
119 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
120 if (enable && !dwc->dis_u2_susphy_quirk)
121 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
122 else
123 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
124
125 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
126 }
127
dwc3_set_prtcap(struct dwc3 * dwc,u32 mode,bool ignore_susphy)128 void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy)
129 {
130 unsigned int hw_mode;
131 u32 reg;
132
133 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
134
135 /*
136 * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE and
137 * GUSB2PHYCFG.SUSPHY should be cleared during mode switching,
138 * and they can be set after core initialization.
139 */
140 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
141 if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && !ignore_susphy) {
142 if (DWC3_GCTL_PRTCAP(reg) != mode)
143 dwc3_enable_susphy(dwc, false);
144 }
145
146 reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
147 reg |= DWC3_GCTL_PRTCAPDIR(mode);
148 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
149
150 dwc->current_dr_role = mode;
151 }
152
__dwc3_set_mode(struct work_struct * work)153 static void __dwc3_set_mode(struct work_struct *work)
154 {
155 struct dwc3 *dwc = work_to_dwc(work);
156 unsigned long flags;
157 int ret;
158 u32 reg;
159 u32 desired_dr_role;
160
161 mutex_lock(&dwc->mutex);
162 spin_lock_irqsave(&dwc->lock, flags);
163 desired_dr_role = dwc->desired_dr_role;
164 spin_unlock_irqrestore(&dwc->lock, flags);
165
166 pm_runtime_get_sync(dwc->dev);
167
168 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
169 dwc3_otg_update(dwc, 0);
170
171 if (!desired_dr_role)
172 goto out;
173
174 if (desired_dr_role == dwc->current_dr_role)
175 goto out;
176
177 if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev)
178 goto out;
179
180 switch (dwc->current_dr_role) {
181 case DWC3_GCTL_PRTCAP_HOST:
182 dwc3_host_exit(dwc);
183 break;
184 case DWC3_GCTL_PRTCAP_DEVICE:
185 dwc3_gadget_exit(dwc);
186 dwc3_event_buffers_cleanup(dwc);
187 break;
188 case DWC3_GCTL_PRTCAP_OTG:
189 dwc3_otg_exit(dwc);
190 spin_lock_irqsave(&dwc->lock, flags);
191 dwc->desired_otg_role = DWC3_OTG_ROLE_IDLE;
192 spin_unlock_irqrestore(&dwc->lock, flags);
193 dwc3_otg_update(dwc, 1);
194 break;
195 default:
196 break;
197 }
198
199 /*
200 * When current_dr_role is not set, there's no role switching.
201 * Only perform GCTL.CoreSoftReset when there's DRD role switching.
202 */
203 if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) ||
204 DWC3_VER_IS_PRIOR(DWC31, 190A)) &&
205 desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) {
206 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
207 reg |= DWC3_GCTL_CORESOFTRESET;
208 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
209
210 /*
211 * Wait for internal clocks to synchronized. DWC_usb31 and
212 * DWC_usb32 may need at least 50ms (less for DWC_usb3). To
213 * keep it consistent across different IPs, let's wait up to
214 * 100ms before clearing GCTL.CORESOFTRESET.
215 */
216 msleep(100);
217
218 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
219 reg &= ~DWC3_GCTL_CORESOFTRESET;
220 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
221 }
222
223 spin_lock_irqsave(&dwc->lock, flags);
224
225 dwc3_set_prtcap(dwc, desired_dr_role, false);
226
227 spin_unlock_irqrestore(&dwc->lock, flags);
228
229 switch (desired_dr_role) {
230 case DWC3_GCTL_PRTCAP_HOST:
231 ret = dwc3_host_init(dwc);
232 if (ret) {
233 dev_err(dwc->dev, "failed to initialize host\n");
234 } else {
235 if (dwc->usb2_phy)
236 otg_set_vbus(dwc->usb2_phy->otg, true);
237 phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
238 phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
239 if (dwc->dis_split_quirk) {
240 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
241 reg |= DWC3_GUCTL3_SPLITDISABLE;
242 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
243 }
244 }
245 break;
246 case DWC3_GCTL_PRTCAP_DEVICE:
247 dwc3_core_soft_reset(dwc);
248
249 dwc3_event_buffers_setup(dwc);
250
251 if (dwc->usb2_phy)
252 otg_set_vbus(dwc->usb2_phy->otg, false);
253 phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
254 phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE);
255
256 ret = dwc3_gadget_init(dwc);
257 if (ret)
258 dev_err(dwc->dev, "failed to initialize peripheral\n");
259 break;
260 case DWC3_GCTL_PRTCAP_OTG:
261 dwc3_otg_init(dwc);
262 dwc3_otg_update(dwc, 0);
263 break;
264 default:
265 break;
266 }
267
268 out:
269 pm_runtime_mark_last_busy(dwc->dev);
270 pm_runtime_put_autosuspend(dwc->dev);
271 mutex_unlock(&dwc->mutex);
272 }
273
dwc3_set_mode(struct dwc3 * dwc,u32 mode)274 void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
275 {
276 unsigned long flags;
277
278 if (dwc->dr_mode != USB_DR_MODE_OTG)
279 return;
280
281 spin_lock_irqsave(&dwc->lock, flags);
282 dwc->desired_dr_role = mode;
283 spin_unlock_irqrestore(&dwc->lock, flags);
284
285 queue_work(system_freezable_wq, &dwc->drd_work);
286 }
287
dwc3_core_fifo_space(struct dwc3_ep * dep,u8 type)288 u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type)
289 {
290 struct dwc3 *dwc = dep->dwc;
291 u32 reg;
292
293 dwc3_writel(dwc->regs, DWC3_GDBGFIFOSPACE,
294 DWC3_GDBGFIFOSPACE_NUM(dep->number) |
295 DWC3_GDBGFIFOSPACE_TYPE(type));
296
297 reg = dwc3_readl(dwc->regs, DWC3_GDBGFIFOSPACE);
298
299 return DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(reg);
300 }
301
302 /**
303 * dwc3_core_soft_reset - Issues core soft reset and PHY reset
304 * @dwc: pointer to our context structure
305 */
dwc3_core_soft_reset(struct dwc3 * dwc)306 int dwc3_core_soft_reset(struct dwc3 *dwc)
307 {
308 u32 reg;
309 int retries = 1000;
310
311 /*
312 * We're resetting only the device side because, if we're in host mode,
313 * XHCI driver will reset the host block. If dwc3 was configured for
314 * host-only mode, then we can return early.
315 */
316 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
317 return 0;
318
319 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
320 reg |= DWC3_DCTL_CSFTRST;
321 reg &= ~DWC3_DCTL_RUN_STOP;
322 dwc3_gadget_dctl_write_safe(dwc, reg);
323
324 /*
325 * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit
326 * is cleared only after all the clocks are synchronized. This can
327 * take a little more than 50ms. Set the polling rate at 20ms
328 * for 10 times instead.
329 */
330 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
331 retries = 10;
332
333 do {
334 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
335 if (!(reg & DWC3_DCTL_CSFTRST))
336 goto done;
337
338 if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
339 msleep(20);
340 else
341 udelay(1);
342 } while (--retries);
343
344 dev_warn(dwc->dev, "DWC3 controller soft reset failed.\n");
345 return -ETIMEDOUT;
346
347 done:
348 /*
349 * For DWC_usb31 controller 1.80a and prior, once DCTL.CSFRST bit
350 * is cleared, we must wait at least 50ms before accessing the PHY
351 * domain (synchronization delay).
352 */
353 if (DWC3_VER_IS_WITHIN(DWC31, ANY, 180A))
354 msleep(50);
355
356 return 0;
357 }
358
359 /*
360 * dwc3_frame_length_adjustment - Adjusts frame length if required
361 * @dwc3: Pointer to our controller context structure
362 */
dwc3_frame_length_adjustment(struct dwc3 * dwc)363 static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
364 {
365 u32 reg;
366 u32 dft;
367
368 if (DWC3_VER_IS_PRIOR(DWC3, 250A))
369 return;
370
371 if (dwc->fladj == 0)
372 return;
373
374 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
375 dft = reg & DWC3_GFLADJ_30MHZ_MASK;
376 if (dft != dwc->fladj) {
377 reg &= ~DWC3_GFLADJ_30MHZ_MASK;
378 reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
379 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
380 }
381 }
382
383 /**
384 * dwc3_ref_clk_period - Reference clock period configuration
385 * Default reference clock period depends on hardware
386 * configuration. For systems with reference clock that differs
387 * from the default, this will set clock period in DWC3_GUCTL
388 * register.
389 * @dwc: Pointer to our controller context structure
390 */
dwc3_ref_clk_period(struct dwc3 * dwc)391 static void dwc3_ref_clk_period(struct dwc3 *dwc)
392 {
393 unsigned long period;
394 unsigned long fladj;
395 unsigned long decr;
396 unsigned long rate;
397 u32 reg;
398
399 if (dwc->ref_clk) {
400 rate = clk_get_rate(dwc->ref_clk);
401 if (!rate)
402 return;
403 period = NSEC_PER_SEC / rate;
404 } else if (dwc->ref_clk_per) {
405 period = dwc->ref_clk_per;
406 rate = NSEC_PER_SEC / period;
407 } else {
408 return;
409 }
410
411 reg = dwc3_readl(dwc->regs, DWC3_GUCTL);
412 reg &= ~DWC3_GUCTL_REFCLKPER_MASK;
413 reg |= FIELD_PREP(DWC3_GUCTL_REFCLKPER_MASK, period);
414 dwc3_writel(dwc->regs, DWC3_GUCTL, reg);
415
416 if (DWC3_VER_IS_PRIOR(DWC3, 250A))
417 return;
418
419 /*
420 * The calculation below is
421 *
422 * 125000 * (NSEC_PER_SEC / (rate * period) - 1)
423 *
424 * but rearranged for fixed-point arithmetic. The division must be
425 * 64-bit because 125000 * NSEC_PER_SEC doesn't fit in 32 bits (and
426 * neither does rate * period).
427 *
428 * Note that rate * period ~= NSEC_PER_SECOND, minus the number of
429 * nanoseconds of error caused by the truncation which happened during
430 * the division when calculating rate or period (whichever one was
431 * derived from the other). We first calculate the relative error, then
432 * scale it to units of 8 ppm.
433 */
434 fladj = div64_u64(125000ULL * NSEC_PER_SEC, (u64)rate * period);
435 fladj -= 125000;
436
437 /*
438 * The documented 240MHz constant is scaled by 2 to get PLS1 as well.
439 */
440 decr = 480000000 / rate;
441
442 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
443 reg &= ~DWC3_GFLADJ_REFCLK_FLADJ_MASK
444 & ~DWC3_GFLADJ_240MHZDECR
445 & ~DWC3_GFLADJ_240MHZDECR_PLS1;
446 reg |= FIELD_PREP(DWC3_GFLADJ_REFCLK_FLADJ_MASK, fladj)
447 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR, decr >> 1)
448 | FIELD_PREP(DWC3_GFLADJ_240MHZDECR_PLS1, decr & 1);
449
450 if (dwc->gfladj_refclk_lpm_sel)
451 reg |= DWC3_GFLADJ_REFCLK_LPM_SEL;
452
453 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
454 }
455
456 /**
457 * dwc3_free_one_event_buffer - Frees one event buffer
458 * @dwc: Pointer to our controller context structure
459 * @evt: Pointer to event buffer to be freed
460 */
dwc3_free_one_event_buffer(struct dwc3 * dwc,struct dwc3_event_buffer * evt)461 static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
462 struct dwc3_event_buffer *evt)
463 {
464 dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma);
465 }
466
467 /**
468 * dwc3_alloc_one_event_buffer - Allocates one event buffer structure
469 * @dwc: Pointer to our controller context structure
470 * @length: size of the event buffer
471 *
472 * Returns a pointer to the allocated event buffer structure on success
473 * otherwise ERR_PTR(errno).
474 */
dwc3_alloc_one_event_buffer(struct dwc3 * dwc,unsigned int length)475 static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
476 unsigned int length)
477 {
478 struct dwc3_event_buffer *evt;
479
480 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
481 if (!evt)
482 return ERR_PTR(-ENOMEM);
483
484 evt->dwc = dwc;
485 evt->length = length;
486 evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL);
487 if (!evt->cache)
488 return ERR_PTR(-ENOMEM);
489
490 evt->buf = dma_alloc_coherent(dwc->sysdev, length,
491 &evt->dma, GFP_KERNEL);
492 if (!evt->buf)
493 return ERR_PTR(-ENOMEM);
494
495 return evt;
496 }
497
498 /**
499 * dwc3_free_event_buffers - frees all allocated event buffers
500 * @dwc: Pointer to our controller context structure
501 */
dwc3_free_event_buffers(struct dwc3 * dwc)502 static void dwc3_free_event_buffers(struct dwc3 *dwc)
503 {
504 struct dwc3_event_buffer *evt;
505
506 evt = dwc->ev_buf;
507 if (evt)
508 dwc3_free_one_event_buffer(dwc, evt);
509 }
510
511 /**
512 * dwc3_alloc_event_buffers - Allocates @num event buffers of size @length
513 * @dwc: pointer to our controller context structure
514 * @length: size of event buffer
515 *
516 * Returns 0 on success otherwise negative errno. In the error case, dwc
517 * may contain some buffers allocated but not all which were requested.
518 */
dwc3_alloc_event_buffers(struct dwc3 * dwc,unsigned int length)519 static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length)
520 {
521 struct dwc3_event_buffer *evt;
522 unsigned int hw_mode;
523
524 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
525 if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) {
526 dwc->ev_buf = NULL;
527 return 0;
528 }
529
530 evt = dwc3_alloc_one_event_buffer(dwc, length);
531 if (IS_ERR(evt)) {
532 dev_err(dwc->dev, "can't allocate event buffer\n");
533 return PTR_ERR(evt);
534 }
535 dwc->ev_buf = evt;
536
537 return 0;
538 }
539
540 /**
541 * dwc3_event_buffers_setup - setup our allocated event buffers
542 * @dwc: pointer to our controller context structure
543 *
544 * Returns 0 on success otherwise negative errno.
545 */
dwc3_event_buffers_setup(struct dwc3 * dwc)546 int dwc3_event_buffers_setup(struct dwc3 *dwc)
547 {
548 struct dwc3_event_buffer *evt;
549 u32 reg;
550
551 if (!dwc->ev_buf)
552 return 0;
553
554 evt = dwc->ev_buf;
555 evt->lpos = 0;
556 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
557 lower_32_bits(evt->dma));
558 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0),
559 upper_32_bits(evt->dma));
560 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
561 DWC3_GEVNTSIZ_SIZE(evt->length));
562
563 /* Clear any stale event */
564 reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
565 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg);
566 return 0;
567 }
568
dwc3_event_buffers_cleanup(struct dwc3 * dwc)569 void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
570 {
571 struct dwc3_event_buffer *evt;
572 u32 reg;
573
574 if (!dwc->ev_buf)
575 return;
576 /*
577 * Exynos platforms may not be able to access event buffer if the
578 * controller failed to halt on dwc3_core_exit().
579 */
580 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
581 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
582 return;
583
584 evt = dwc->ev_buf;
585
586 evt->lpos = 0;
587
588 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 0);
589 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 0);
590 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_INTMASK
591 | DWC3_GEVNTSIZ_SIZE(0));
592
593 /* Clear any stale event */
594 reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
595 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg);
596 }
597
dwc3_core_num_eps(struct dwc3 * dwc)598 static void dwc3_core_num_eps(struct dwc3 *dwc)
599 {
600 struct dwc3_hwparams *parms = &dwc->hwparams;
601
602 dwc->num_eps = DWC3_NUM_EPS(parms);
603 }
604
dwc3_cache_hwparams(struct dwc3 * dwc)605 static void dwc3_cache_hwparams(struct dwc3 *dwc)
606 {
607 struct dwc3_hwparams *parms = &dwc->hwparams;
608
609 parms->hwparams0 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS0);
610 parms->hwparams1 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS1);
611 parms->hwparams2 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS2);
612 parms->hwparams3 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS3);
613 parms->hwparams4 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS4);
614 parms->hwparams5 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS5);
615 parms->hwparams6 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6);
616 parms->hwparams7 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS7);
617 parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8);
618
619 if (DWC3_IP_IS(DWC32))
620 parms->hwparams9 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS9);
621 }
622
dwc3_core_ulpi_init(struct dwc3 * dwc)623 static int dwc3_core_ulpi_init(struct dwc3 *dwc)
624 {
625 int intf;
626 int ret = 0;
627
628 intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3);
629
630 if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI ||
631 (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI &&
632 dwc->hsphy_interface &&
633 !strncmp(dwc->hsphy_interface, "ulpi", 4)))
634 ret = dwc3_ulpi_init(dwc);
635
636 return ret;
637 }
638
639 /**
640 * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core
641 * @dwc: Pointer to our controller context structure
642 *
643 * Returns 0 on success. The USB PHY interfaces are configured but not
644 * initialized. The PHY interfaces and the PHYs get initialized together with
645 * the core in dwc3_core_init.
646 */
dwc3_phy_setup(struct dwc3 * dwc)647 static int dwc3_phy_setup(struct dwc3 *dwc)
648 {
649 u32 reg;
650
651 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
652
653 /*
654 * Make sure UX_EXIT_PX is cleared as that causes issues with some
655 * PHYs. Also, this bit is not supposed to be used in normal operation.
656 */
657 reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX;
658
659 /* Ensure the GUSB3PIPECTL.SUSPENDENABLE is cleared prior to phy init. */
660 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
661
662 if (dwc->u2ss_inp3_quirk)
663 reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK;
664
665 if (dwc->dis_rxdet_inp3_quirk)
666 reg |= DWC3_GUSB3PIPECTL_DISRXDETINP3;
667
668 if (dwc->req_p1p2p3_quirk)
669 reg |= DWC3_GUSB3PIPECTL_REQP1P2P3;
670
671 if (dwc->del_p1p2p3_quirk)
672 reg |= DWC3_GUSB3PIPECTL_DEP1P2P3_EN;
673
674 if (dwc->del_phy_power_chg_quirk)
675 reg |= DWC3_GUSB3PIPECTL_DEPOCHANGE;
676
677 if (dwc->lfps_filter_quirk)
678 reg |= DWC3_GUSB3PIPECTL_LFPSFILT;
679
680 if (dwc->rx_detect_poll_quirk)
681 reg |= DWC3_GUSB3PIPECTL_RX_DETOPOLL;
682
683 if (dwc->tx_de_emphasis_quirk)
684 reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis);
685
686 if (dwc->dis_del_phy_power_chg_quirk)
687 reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE;
688
689 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
690
691 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
692
693 /* Select the HS PHY interface */
694 switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) {
695 case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI:
696 if (dwc->hsphy_interface &&
697 !strncmp(dwc->hsphy_interface, "utmi", 4)) {
698 reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI;
699 break;
700 } else if (dwc->hsphy_interface &&
701 !strncmp(dwc->hsphy_interface, "ulpi", 4)) {
702 reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI;
703 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
704 } else {
705 /* Relying on default value. */
706 if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI))
707 break;
708 }
709 fallthrough;
710 case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI:
711 default:
712 break;
713 }
714
715 switch (dwc->hsphy_mode) {
716 case USBPHY_INTERFACE_MODE_UTMI:
717 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK |
718 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK);
719 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_8_BIT) |
720 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_8_BIT);
721 break;
722 case USBPHY_INTERFACE_MODE_UTMIW:
723 reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK |
724 DWC3_GUSB2PHYCFG_USBTRDTIM_MASK);
725 reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_16_BIT) |
726 DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_16_BIT);
727 break;
728 default:
729 break;
730 }
731
732 /* Ensure the GUSB2PHYCFG.SUSPHY is cleared prior to phy init. */
733 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
734
735 if (dwc->dis_enblslpm_quirk)
736 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
737 else
738 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM;
739
740 if (dwc->dis_u2_freeclk_exists_quirk || dwc->gfladj_refclk_lpm_sel)
741 reg &= ~DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS;
742
743 /*
744 * Some ULPI USB PHY does not support internal VBUS supply, to drive
745 * the CPEN pin requires the configuration of the ULPI DRVVBUSEXTERNAL
746 * bit of OTG_CTRL register. Controller configures the USB2 PHY
747 * ULPIEXTVBUSDRV bit[17] of the GUSB2PHYCFG register to drive vBus
748 * with an external supply.
749 */
750 if (dwc->ulpi_ext_vbus_drv)
751 reg |= DWC3_GUSB2PHYCFG_ULPIEXTVBUSDRV;
752
753 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
754
755 return 0;
756 }
757
dwc3_phy_init(struct dwc3 * dwc)758 static int dwc3_phy_init(struct dwc3 *dwc)
759 {
760 int ret;
761
762 usb_phy_init(dwc->usb2_phy);
763 usb_phy_init(dwc->usb3_phy);
764
765 ret = phy_init(dwc->usb2_generic_phy);
766 if (ret < 0)
767 goto err_shutdown_usb3_phy;
768
769 ret = phy_init(dwc->usb3_generic_phy);
770 if (ret < 0)
771 goto err_exit_usb2_phy;
772
773 return 0;
774
775 err_exit_usb2_phy:
776 phy_exit(dwc->usb2_generic_phy);
777 err_shutdown_usb3_phy:
778 usb_phy_shutdown(dwc->usb3_phy);
779 usb_phy_shutdown(dwc->usb2_phy);
780
781 return ret;
782 }
783
dwc3_phy_exit(struct dwc3 * dwc)784 static void dwc3_phy_exit(struct dwc3 *dwc)
785 {
786 phy_exit(dwc->usb3_generic_phy);
787 phy_exit(dwc->usb2_generic_phy);
788
789 usb_phy_shutdown(dwc->usb3_phy);
790 usb_phy_shutdown(dwc->usb2_phy);
791 }
792
dwc3_phy_power_on(struct dwc3 * dwc)793 static int dwc3_phy_power_on(struct dwc3 *dwc)
794 {
795 int ret;
796
797 usb_phy_set_suspend(dwc->usb2_phy, 0);
798 usb_phy_set_suspend(dwc->usb3_phy, 0);
799
800 ret = phy_power_on(dwc->usb2_generic_phy);
801 if (ret < 0)
802 goto err_suspend_usb3_phy;
803
804 ret = phy_power_on(dwc->usb3_generic_phy);
805 if (ret < 0)
806 goto err_power_off_usb2_phy;
807
808 /*
809 * Above DWC_usb3.0 1.94a, it is recommended to set
810 * DWC3_GUSB3PIPECTL_SUSPHY and DWC3_GUSB2PHYCFG_SUSPHY to '0' during
811 * coreConsultant configuration. So default value will be '0' when the
812 * core is reset. Application needs to set it to '1' after the core
813 * initialization is completed.
814 *
815 * Certain phy requires to be in P0 power state during initialization.
816 * Make sure GUSB3PIPECTL.SUSPENDENABLE and GUSB2PHYCFG.SUSPHY are clear
817 * prior to phy init to maintain in the P0 state.
818 *
819 * After phy initialization, some phy operations can only be executed
820 * while in lower P states. Ensure GUSB3PIPECTL.SUSPENDENABLE and
821 * GUSB2PHYCFG.SUSPHY are set soon after initialization to avoid
822 * blocking phy ops.
823 */
824 if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
825 dwc3_enable_susphy(dwc, true);
826
827 return 0;
828
829 err_power_off_usb2_phy:
830 phy_power_off(dwc->usb2_generic_phy);
831 err_suspend_usb3_phy:
832 usb_phy_set_suspend(dwc->usb3_phy, 1);
833 usb_phy_set_suspend(dwc->usb2_phy, 1);
834
835 return ret;
836 }
837
dwc3_phy_power_off(struct dwc3 * dwc)838 static void dwc3_phy_power_off(struct dwc3 *dwc)
839 {
840 phy_power_off(dwc->usb3_generic_phy);
841 phy_power_off(dwc->usb2_generic_phy);
842
843 usb_phy_set_suspend(dwc->usb3_phy, 1);
844 usb_phy_set_suspend(dwc->usb2_phy, 1);
845 }
846
dwc3_clk_enable(struct dwc3 * dwc)847 static int dwc3_clk_enable(struct dwc3 *dwc)
848 {
849 int ret;
850
851 ret = clk_prepare_enable(dwc->bus_clk);
852 if (ret)
853 return ret;
854
855 ret = clk_prepare_enable(dwc->ref_clk);
856 if (ret)
857 goto disable_bus_clk;
858
859 ret = clk_prepare_enable(dwc->susp_clk);
860 if (ret)
861 goto disable_ref_clk;
862
863 return 0;
864
865 disable_ref_clk:
866 clk_disable_unprepare(dwc->ref_clk);
867 disable_bus_clk:
868 clk_disable_unprepare(dwc->bus_clk);
869 return ret;
870 }
871
dwc3_clk_disable(struct dwc3 * dwc)872 static void dwc3_clk_disable(struct dwc3 *dwc)
873 {
874 clk_disable_unprepare(dwc->susp_clk);
875 clk_disable_unprepare(dwc->ref_clk);
876 clk_disable_unprepare(dwc->bus_clk);
877 }
878
dwc3_core_exit(struct dwc3 * dwc)879 static void dwc3_core_exit(struct dwc3 *dwc)
880 {
881 dwc3_event_buffers_cleanup(dwc);
882 dwc3_phy_power_off(dwc);
883 dwc3_phy_exit(dwc);
884 dwc3_clk_disable(dwc);
885 reset_control_assert(dwc->reset);
886 }
887
dwc3_core_is_valid(struct dwc3 * dwc)888 static bool dwc3_core_is_valid(struct dwc3 *dwc)
889 {
890 u32 reg;
891
892 reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
893 dwc->ip = DWC3_GSNPS_ID(reg);
894
895 /* This should read as U3 followed by revision number */
896 if (DWC3_IP_IS(DWC3)) {
897 dwc->revision = reg;
898 } else if (DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) {
899 dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER);
900 dwc->version_type = dwc3_readl(dwc->regs, DWC3_VER_TYPE);
901 } else {
902 return false;
903 }
904
905 return true;
906 }
907
dwc3_core_setup_global_control(struct dwc3 * dwc)908 static void dwc3_core_setup_global_control(struct dwc3 *dwc)
909 {
910 unsigned int power_opt;
911 unsigned int hw_mode;
912 u32 reg;
913
914 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
915 reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
916 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
917 power_opt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
918
919 switch (power_opt) {
920 case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
921 /**
922 * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an
923 * issue which would cause xHCI compliance tests to fail.
924 *
925 * Because of that we cannot enable clock gating on such
926 * configurations.
927 *
928 * Refers to:
929 *
930 * STAR#9000588375: Clock Gating, SOF Issues when ref_clk-Based
931 * SOF/ITP Mode Used
932 */
933 if ((dwc->dr_mode == USB_DR_MODE_HOST ||
934 dwc->dr_mode == USB_DR_MODE_OTG) &&
935 DWC3_VER_IS_WITHIN(DWC3, 210A, 250A))
936 reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC;
937 else
938 reg &= ~DWC3_GCTL_DSBLCLKGTNG;
939 break;
940 case DWC3_GHWPARAMS1_EN_PWROPT_HIB:
941 /*
942 * REVISIT Enabling this bit so that host-mode hibernation
943 * will work. Device-mode hibernation is not yet implemented.
944 */
945 reg |= DWC3_GCTL_GBLHIBERNATIONEN;
946 break;
947 default:
948 /* nothing */
949 break;
950 }
951
952 /*
953 * This is a workaround for STAR#4846132, which only affects
954 * DWC_usb31 version2.00a operating in host mode.
955 *
956 * There is a problem in DWC_usb31 version 2.00a operating
957 * in host mode that would cause a CSR read timeout When CSR
958 * read coincides with RAM Clock Gating Entry. By disable
959 * Clock Gating, sacrificing power consumption for normal
960 * operation.
961 */
962 if (power_opt != DWC3_GHWPARAMS1_EN_PWROPT_NO &&
963 hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && DWC3_VER_IS(DWC31, 200A))
964 reg |= DWC3_GCTL_DSBLCLKGTNG;
965
966 /* check if current dwc3 is on simulation board */
967 if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
968 dev_info(dwc->dev, "Running with FPGA optimizations\n");
969 dwc->is_fpga = true;
970 }
971
972 WARN_ONCE(dwc->disable_scramble_quirk && !dwc->is_fpga,
973 "disable_scramble cannot be used on non-FPGA builds\n");
974
975 if (dwc->disable_scramble_quirk && dwc->is_fpga)
976 reg |= DWC3_GCTL_DISSCRAMBLE;
977 else
978 reg &= ~DWC3_GCTL_DISSCRAMBLE;
979
980 if (dwc->u2exit_lfps_quirk)
981 reg |= DWC3_GCTL_U2EXIT_LFPS;
982
983 /*
984 * WORKAROUND: DWC3 revisions <1.90a have a bug
985 * where the device can fail to connect at SuperSpeed
986 * and falls back to high-speed mode which causes
987 * the device to enter a Connect/Disconnect loop
988 */
989 if (DWC3_VER_IS_PRIOR(DWC3, 190A))
990 reg |= DWC3_GCTL_U2RSTECN;
991
992 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
993 }
994
995 static int dwc3_core_get_phy(struct dwc3 *dwc);
996 static int dwc3_core_ulpi_init(struct dwc3 *dwc);
997
998 /* set global incr burst type configuration registers */
dwc3_set_incr_burst_type(struct dwc3 * dwc)999 static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
1000 {
1001 struct device *dev = dwc->dev;
1002 /* incrx_mode : for INCR burst type. */
1003 bool incrx_mode;
1004 /* incrx_size : for size of INCRX burst. */
1005 u32 incrx_size;
1006 u32 *vals;
1007 u32 cfg;
1008 int ntype;
1009 int ret;
1010 int i;
1011
1012 cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0);
1013
1014 /*
1015 * Handle property "snps,incr-burst-type-adjustment".
1016 * Get the number of value from this property:
1017 * result <= 0, means this property is not supported.
1018 * result = 1, means INCRx burst mode supported.
1019 * result > 1, means undefined length burst mode supported.
1020 */
1021 ntype = device_property_count_u32(dev, "snps,incr-burst-type-adjustment");
1022 if (ntype <= 0)
1023 return;
1024
1025 vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL);
1026 if (!vals)
1027 return;
1028
1029 /* Get INCR burst type, and parse it */
1030 ret = device_property_read_u32_array(dev,
1031 "snps,incr-burst-type-adjustment", vals, ntype);
1032 if (ret) {
1033 kfree(vals);
1034 dev_err(dev, "Error to get property\n");
1035 return;
1036 }
1037
1038 incrx_size = *vals;
1039
1040 if (ntype > 1) {
1041 /* INCRX (undefined length) burst mode */
1042 incrx_mode = INCRX_UNDEF_LENGTH_BURST_MODE;
1043 for (i = 1; i < ntype; i++) {
1044 if (vals[i] > incrx_size)
1045 incrx_size = vals[i];
1046 }
1047 } else {
1048 /* INCRX burst mode */
1049 incrx_mode = INCRX_BURST_MODE;
1050 }
1051
1052 kfree(vals);
1053
1054 /* Enable Undefined Length INCR Burst and Enable INCRx Burst */
1055 cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK;
1056 if (incrx_mode)
1057 cfg |= DWC3_GSBUSCFG0_INCRBRSTENA;
1058 switch (incrx_size) {
1059 case 256:
1060 cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA;
1061 break;
1062 case 128:
1063 cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA;
1064 break;
1065 case 64:
1066 cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA;
1067 break;
1068 case 32:
1069 cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA;
1070 break;
1071 case 16:
1072 cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA;
1073 break;
1074 case 8:
1075 cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA;
1076 break;
1077 case 4:
1078 cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA;
1079 break;
1080 case 1:
1081 break;
1082 default:
1083 dev_err(dev, "Invalid property\n");
1084 break;
1085 }
1086
1087 dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg);
1088 }
1089
dwc3_set_power_down_clk_scale(struct dwc3 * dwc)1090 static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc)
1091 {
1092 u32 scale;
1093 u32 reg;
1094
1095 if (!dwc->susp_clk)
1096 return;
1097
1098 /*
1099 * The power down scale field specifies how many suspend_clk
1100 * periods fit into a 16KHz clock period. When performing
1101 * the division, round up the remainder.
1102 *
1103 * The power down scale value is calculated using the fastest
1104 * frequency of the suspend_clk. If it isn't fixed (but within
1105 * the accuracy requirement), the driver may not know the max
1106 * rate of the suspend_clk, so only update the power down scale
1107 * if the default is less than the calculated value from
1108 * clk_get_rate() or if the default is questionably high
1109 * (3x or more) to be within the requirement.
1110 */
1111 scale = DIV_ROUND_UP(clk_get_rate(dwc->susp_clk), 16000);
1112 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1113 if ((reg & DWC3_GCTL_PWRDNSCALE_MASK) < DWC3_GCTL_PWRDNSCALE(scale) ||
1114 (reg & DWC3_GCTL_PWRDNSCALE_MASK) > DWC3_GCTL_PWRDNSCALE(scale*3)) {
1115 reg &= ~(DWC3_GCTL_PWRDNSCALE_MASK);
1116 reg |= DWC3_GCTL_PWRDNSCALE(scale);
1117 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1118 }
1119 }
1120
dwc3_config_threshold(struct dwc3 * dwc)1121 static void dwc3_config_threshold(struct dwc3 *dwc)
1122 {
1123 u32 reg;
1124 u8 rx_thr_num;
1125 u8 rx_maxburst;
1126 u8 tx_thr_num;
1127 u8 tx_maxburst;
1128
1129 /*
1130 * Must config both number of packets and max burst settings to enable
1131 * RX and/or TX threshold.
1132 */
1133 if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
1134 rx_thr_num = dwc->rx_thr_num_pkt_prd;
1135 rx_maxburst = dwc->rx_max_burst_prd;
1136 tx_thr_num = dwc->tx_thr_num_pkt_prd;
1137 tx_maxburst = dwc->tx_max_burst_prd;
1138
1139 if (rx_thr_num && rx_maxburst) {
1140 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1141 reg |= DWC31_RXTHRNUMPKTSEL_PRD;
1142
1143 reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
1144 reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
1145
1146 reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
1147 reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
1148
1149 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1150 }
1151
1152 if (tx_thr_num && tx_maxburst) {
1153 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
1154 reg |= DWC31_TXTHRNUMPKTSEL_PRD;
1155
1156 reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
1157 reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
1158
1159 reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
1160 reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
1161
1162 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
1163 }
1164 }
1165
1166 rx_thr_num = dwc->rx_thr_num_pkt;
1167 rx_maxburst = dwc->rx_max_burst;
1168 tx_thr_num = dwc->tx_thr_num_pkt;
1169 tx_maxburst = dwc->tx_max_burst;
1170
1171 if (DWC3_IP_IS(DWC3)) {
1172 if (rx_thr_num && rx_maxburst) {
1173 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1174 reg |= DWC3_GRXTHRCFG_PKTCNTSEL;
1175
1176 reg &= ~DWC3_GRXTHRCFG_RXPKTCNT(~0);
1177 reg |= DWC3_GRXTHRCFG_RXPKTCNT(rx_thr_num);
1178
1179 reg &= ~DWC3_GRXTHRCFG_MAXRXBURSTSIZE(~0);
1180 reg |= DWC3_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
1181
1182 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1183 }
1184
1185 if (tx_thr_num && tx_maxburst) {
1186 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
1187 reg |= DWC3_GTXTHRCFG_PKTCNTSEL;
1188
1189 reg &= ~DWC3_GTXTHRCFG_TXPKTCNT(~0);
1190 reg |= DWC3_GTXTHRCFG_TXPKTCNT(tx_thr_num);
1191
1192 reg &= ~DWC3_GTXTHRCFG_MAXTXBURSTSIZE(~0);
1193 reg |= DWC3_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
1194
1195 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
1196 }
1197 } else {
1198 if (rx_thr_num && rx_maxburst) {
1199 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1200 reg |= DWC31_GRXTHRCFG_PKTCNTSEL;
1201
1202 reg &= ~DWC31_GRXTHRCFG_RXPKTCNT(~0);
1203 reg |= DWC31_GRXTHRCFG_RXPKTCNT(rx_thr_num);
1204
1205 reg &= ~DWC31_GRXTHRCFG_MAXRXBURSTSIZE(~0);
1206 reg |= DWC31_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
1207
1208 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1209 }
1210
1211 if (tx_thr_num && tx_maxburst) {
1212 reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
1213 reg |= DWC31_GTXTHRCFG_PKTCNTSEL;
1214
1215 reg &= ~DWC31_GTXTHRCFG_TXPKTCNT(~0);
1216 reg |= DWC31_GTXTHRCFG_TXPKTCNT(tx_thr_num);
1217
1218 reg &= ~DWC31_GTXTHRCFG_MAXTXBURSTSIZE(~0);
1219 reg |= DWC31_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
1220
1221 dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
1222 }
1223 }
1224 }
1225
1226 /**
1227 * dwc3_core_init - Low-level initialization of DWC3 Core
1228 * @dwc: Pointer to our controller context structure
1229 *
1230 * Returns 0 on success otherwise negative errno.
1231 */
dwc3_core_init(struct dwc3 * dwc)1232 static int dwc3_core_init(struct dwc3 *dwc)
1233 {
1234 unsigned int hw_mode;
1235 u32 reg;
1236 int ret;
1237
1238 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
1239
1240 /*
1241 * Write Linux Version Code to our GUID register so it's easy to figure
1242 * out which kernel version a bug was found.
1243 */
1244 dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
1245
1246 ret = dwc3_phy_setup(dwc);
1247 if (ret)
1248 return ret;
1249
1250 if (!dwc->ulpi_ready) {
1251 ret = dwc3_core_ulpi_init(dwc);
1252 if (ret) {
1253 if (ret == -ETIMEDOUT) {
1254 dwc3_core_soft_reset(dwc);
1255 ret = -EPROBE_DEFER;
1256 }
1257 return ret;
1258 }
1259 dwc->ulpi_ready = true;
1260 }
1261
1262 if (!dwc->phys_ready) {
1263 ret = dwc3_core_get_phy(dwc);
1264 if (ret)
1265 goto err_exit_ulpi;
1266 dwc->phys_ready = true;
1267 }
1268
1269 ret = dwc3_phy_init(dwc);
1270 if (ret)
1271 goto err_exit_ulpi;
1272
1273 ret = dwc3_core_soft_reset(dwc);
1274 if (ret)
1275 goto err_exit_phy;
1276
1277 dwc3_core_setup_global_control(dwc);
1278 dwc3_core_num_eps(dwc);
1279
1280 /* Set power down scale of suspend_clk */
1281 dwc3_set_power_down_clk_scale(dwc);
1282
1283 /* Adjust Frame Length */
1284 dwc3_frame_length_adjustment(dwc);
1285
1286 /* Adjust Reference Clock Period */
1287 dwc3_ref_clk_period(dwc);
1288
1289 dwc3_set_incr_burst_type(dwc);
1290
1291 ret = dwc3_phy_power_on(dwc);
1292 if (ret)
1293 goto err_exit_phy;
1294
1295 ret = dwc3_event_buffers_setup(dwc);
1296 if (ret) {
1297 dev_err(dwc->dev, "failed to setup event buffers\n");
1298 goto err_power_off_phy;
1299 }
1300
1301 /*
1302 * ENDXFER polling is available on version 3.10a and later of
1303 * the DWC_usb3 controller. It is NOT available in the
1304 * DWC_usb31 controller.
1305 */
1306 if (DWC3_VER_IS_WITHIN(DWC3, 310A, ANY)) {
1307 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
1308 reg |= DWC3_GUCTL2_RST_ACTBITLATER;
1309 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
1310 }
1311
1312 /*
1313 * STAR 9001285599: This issue affects DWC_usb3 version 3.20a
1314 * only. If the PM TIMER ECM is enabled through GUCTL2[19], the
1315 * link compliance test (TD7.21) may fail. If the ECN is not
1316 * enabled (GUCTL2[19] = 0), the controller will use the old timer
1317 * value (5us), which is still acceptable for the link compliance
1318 * test. Therefore, do not enable PM TIMER ECM in 3.20a by
1319 * setting GUCTL2[19] by default; instead, use GUCTL2[19] = 0.
1320 */
1321 if (DWC3_VER_IS(DWC3, 320A)) {
1322 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
1323 reg &= ~DWC3_GUCTL2_LC_TIMER;
1324 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
1325 }
1326
1327 /*
1328 * When configured in HOST mode, after issuing U3/L2 exit controller
1329 * fails to send proper CRC checksum in CRC5 feild. Because of this
1330 * behaviour Transaction Error is generated, resulting in reset and
1331 * re-enumeration of usb device attached. All the termsel, xcvrsel,
1332 * opmode becomes 0 during end of resume. Enabling bit 10 of GUCTL1
1333 * will correct this problem. This option is to support certain
1334 * legacy ULPI PHYs.
1335 */
1336 if (dwc->resume_hs_terminations) {
1337 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
1338 reg |= DWC3_GUCTL1_RESUME_OPMODE_HS_HOST;
1339 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
1340 }
1341
1342 if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) {
1343 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
1344
1345 /*
1346 * Enable hardware control of sending remote wakeup
1347 * in HS when the device is in the L1 state.
1348 */
1349 if (!DWC3_VER_IS_PRIOR(DWC3, 290A))
1350 reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW;
1351
1352 /*
1353 * Decouple USB 2.0 L1 & L2 events which will allow for
1354 * gadget driver to only receive U3/L2 suspend & wakeup
1355 * events and prevent the more frequent L1 LPM transitions
1356 * from interrupting the driver.
1357 */
1358 if (!DWC3_VER_IS_PRIOR(DWC3, 300A))
1359 reg |= DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT;
1360
1361 if (dwc->dis_tx_ipgap_linecheck_quirk)
1362 reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS;
1363
1364 if (dwc->parkmode_disable_ss_quirk)
1365 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS;
1366
1367 if (dwc->parkmode_disable_hs_quirk)
1368 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_HS;
1369
1370 if (DWC3_VER_IS_WITHIN(DWC3, 290A, ANY) &&
1371 (dwc->maximum_speed == USB_SPEED_HIGH ||
1372 dwc->maximum_speed == USB_SPEED_FULL))
1373 reg |= DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK;
1374
1375 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
1376 }
1377
1378 dwc3_config_threshold(dwc);
1379
1380 return 0;
1381
1382 err_power_off_phy:
1383 dwc3_phy_power_off(dwc);
1384 err_exit_phy:
1385 dwc3_phy_exit(dwc);
1386 err_exit_ulpi:
1387 dwc3_ulpi_exit(dwc);
1388
1389 return ret;
1390 }
1391
dwc3_core_get_phy(struct dwc3 * dwc)1392 static int dwc3_core_get_phy(struct dwc3 *dwc)
1393 {
1394 struct device *dev = dwc->dev;
1395 struct device_node *node = dev->of_node;
1396 int ret;
1397
1398 if (node) {
1399 dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0);
1400 dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1);
1401 } else {
1402 dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
1403 dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3);
1404 }
1405
1406 if (IS_ERR(dwc->usb2_phy)) {
1407 ret = PTR_ERR(dwc->usb2_phy);
1408 if (ret == -ENXIO || ret == -ENODEV)
1409 dwc->usb2_phy = NULL;
1410 else
1411 return dev_err_probe(dev, ret, "no usb2 phy configured\n");
1412 }
1413
1414 if (IS_ERR(dwc->usb3_phy)) {
1415 ret = PTR_ERR(dwc->usb3_phy);
1416 if (ret == -ENXIO || ret == -ENODEV)
1417 dwc->usb3_phy = NULL;
1418 else
1419 return dev_err_probe(dev, ret, "no usb3 phy configured\n");
1420 }
1421
1422 dwc->usb2_generic_phy = devm_phy_get(dev, "usb2-phy");
1423 if (IS_ERR(dwc->usb2_generic_phy)) {
1424 ret = PTR_ERR(dwc->usb2_generic_phy);
1425 if (ret == -ENOSYS || ret == -ENODEV)
1426 dwc->usb2_generic_phy = NULL;
1427 else
1428 return dev_err_probe(dev, ret, "no usb2 phy configured\n");
1429 }
1430
1431 dwc->usb3_generic_phy = devm_phy_get(dev, "usb3-phy");
1432 if (IS_ERR(dwc->usb3_generic_phy)) {
1433 ret = PTR_ERR(dwc->usb3_generic_phy);
1434 if (ret == -ENOSYS || ret == -ENODEV)
1435 dwc->usb3_generic_phy = NULL;
1436 else
1437 return dev_err_probe(dev, ret, "no usb3 phy configured\n");
1438 }
1439
1440 return 0;
1441 }
1442
dwc3_core_init_mode(struct dwc3 * dwc)1443 static int dwc3_core_init_mode(struct dwc3 *dwc)
1444 {
1445 struct device *dev = dwc->dev;
1446 int ret;
1447
1448 switch (dwc->dr_mode) {
1449 case USB_DR_MODE_PERIPHERAL:
1450 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, false);
1451
1452 if (dwc->usb2_phy)
1453 otg_set_vbus(dwc->usb2_phy->otg, false);
1454 phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
1455 phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE);
1456
1457 ret = dwc3_gadget_init(dwc);
1458 if (ret)
1459 return dev_err_probe(dev, ret, "failed to initialize gadget\n");
1460 break;
1461 case USB_DR_MODE_HOST:
1462 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, false);
1463
1464 if (dwc->usb2_phy)
1465 otg_set_vbus(dwc->usb2_phy->otg, true);
1466 phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
1467 phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
1468
1469 ret = dwc3_host_init(dwc);
1470 if (ret)
1471 return dev_err_probe(dev, ret, "failed to initialize host\n");
1472 break;
1473 case USB_DR_MODE_OTG:
1474 INIT_WORK(&dwc->drd_work, __dwc3_set_mode);
1475 ret = dwc3_drd_init(dwc);
1476 if (ret)
1477 return dev_err_probe(dev, ret, "failed to initialize dual-role\n");
1478 break;
1479 default:
1480 dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
1481 return -EINVAL;
1482 }
1483
1484 return 0;
1485 }
1486
dwc3_core_exit_mode(struct dwc3 * dwc)1487 static void dwc3_core_exit_mode(struct dwc3 *dwc)
1488 {
1489 switch (dwc->dr_mode) {
1490 case USB_DR_MODE_PERIPHERAL:
1491 dwc3_gadget_exit(dwc);
1492 break;
1493 case USB_DR_MODE_HOST:
1494 dwc3_host_exit(dwc);
1495 break;
1496 case USB_DR_MODE_OTG:
1497 dwc3_drd_exit(dwc);
1498 break;
1499 default:
1500 /* do nothing */
1501 break;
1502 }
1503
1504 /* de-assert DRVVBUS for HOST and OTG mode */
1505 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true);
1506 }
1507
dwc3_get_properties(struct dwc3 * dwc)1508 static void dwc3_get_properties(struct dwc3 *dwc)
1509 {
1510 struct device *dev = dwc->dev;
1511 u8 lpm_nyet_threshold;
1512 u8 tx_de_emphasis;
1513 u8 hird_threshold;
1514 u8 rx_thr_num_pkt = 0;
1515 u8 rx_max_burst = 0;
1516 u8 tx_thr_num_pkt = 0;
1517 u8 tx_max_burst = 0;
1518 u8 rx_thr_num_pkt_prd = 0;
1519 u8 rx_max_burst_prd = 0;
1520 u8 tx_thr_num_pkt_prd = 0;
1521 u8 tx_max_burst_prd = 0;
1522 u8 tx_fifo_resize_max_num;
1523
1524 /* default to highest possible threshold */
1525 lpm_nyet_threshold = 0xf;
1526
1527 /* default to -3.5dB de-emphasis */
1528 tx_de_emphasis = 1;
1529
1530 /*
1531 * default to assert utmi_sleep_n and use maximum allowed HIRD
1532 * threshold value of 0b1100
1533 */
1534 hird_threshold = 12;
1535
1536 /*
1537 * default to a TXFIFO size large enough to fit 6 max packets. This
1538 * allows for systems with larger bus latencies to have some headroom
1539 * for endpoints that have a large bMaxBurst value.
1540 */
1541 tx_fifo_resize_max_num = 6;
1542
1543 dwc->maximum_speed = usb_get_maximum_speed(dev);
1544 dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev);
1545 dwc->dr_mode = usb_get_dr_mode(dev);
1546 dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node);
1547
1548 dwc->sysdev_is_parent = device_property_read_bool(dev,
1549 "linux,sysdev_is_parent");
1550 if (dwc->sysdev_is_parent)
1551 dwc->sysdev = dwc->dev->parent;
1552 else
1553 dwc->sysdev = dwc->dev;
1554
1555 dwc->sys_wakeup = device_may_wakeup(dwc->sysdev);
1556
1557 dwc->has_lpm_erratum = device_property_read_bool(dev,
1558 "snps,has-lpm-erratum");
1559 device_property_read_u8(dev, "snps,lpm-nyet-threshold",
1560 &lpm_nyet_threshold);
1561 dwc->is_utmi_l1_suspend = device_property_read_bool(dev,
1562 "snps,is-utmi-l1-suspend");
1563 device_property_read_u8(dev, "snps,hird-threshold",
1564 &hird_threshold);
1565 dwc->dis_start_transfer_quirk = device_property_read_bool(dev,
1566 "snps,dis-start-transfer-quirk");
1567 dwc->usb3_lpm_capable = device_property_read_bool(dev,
1568 "snps,usb3_lpm_capable");
1569 dwc->usb2_lpm_disable = device_property_read_bool(dev,
1570 "snps,usb2-lpm-disable");
1571 dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
1572 "snps,usb2-gadget-lpm-disable");
1573 device_property_read_u8(dev, "snps,rx-thr-num-pkt",
1574 &rx_thr_num_pkt);
1575 device_property_read_u8(dev, "snps,rx-max-burst",
1576 &rx_max_burst);
1577 device_property_read_u8(dev, "snps,tx-thr-num-pkt",
1578 &tx_thr_num_pkt);
1579 device_property_read_u8(dev, "snps,tx-max-burst",
1580 &tx_max_burst);
1581 device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
1582 &rx_thr_num_pkt_prd);
1583 device_property_read_u8(dev, "snps,rx-max-burst-prd",
1584 &rx_max_burst_prd);
1585 device_property_read_u8(dev, "snps,tx-thr-num-pkt-prd",
1586 &tx_thr_num_pkt_prd);
1587 device_property_read_u8(dev, "snps,tx-max-burst-prd",
1588 &tx_max_burst_prd);
1589 dwc->do_fifo_resize = device_property_read_bool(dev,
1590 "tx-fifo-resize");
1591 if (dwc->do_fifo_resize)
1592 device_property_read_u8(dev, "tx-fifo-max-num",
1593 &tx_fifo_resize_max_num);
1594
1595 dwc->disable_scramble_quirk = device_property_read_bool(dev,
1596 "snps,disable_scramble_quirk");
1597 dwc->u2exit_lfps_quirk = device_property_read_bool(dev,
1598 "snps,u2exit_lfps_quirk");
1599 dwc->u2ss_inp3_quirk = device_property_read_bool(dev,
1600 "snps,u2ss_inp3_quirk");
1601 dwc->req_p1p2p3_quirk = device_property_read_bool(dev,
1602 "snps,req_p1p2p3_quirk");
1603 dwc->del_p1p2p3_quirk = device_property_read_bool(dev,
1604 "snps,del_p1p2p3_quirk");
1605 dwc->del_phy_power_chg_quirk = device_property_read_bool(dev,
1606 "snps,del_phy_power_chg_quirk");
1607 dwc->lfps_filter_quirk = device_property_read_bool(dev,
1608 "snps,lfps_filter_quirk");
1609 dwc->rx_detect_poll_quirk = device_property_read_bool(dev,
1610 "snps,rx_detect_poll_quirk");
1611 dwc->dis_u3_susphy_quirk = device_property_read_bool(dev,
1612 "snps,dis_u3_susphy_quirk");
1613 dwc->dis_u2_susphy_quirk = device_property_read_bool(dev,
1614 "snps,dis_u2_susphy_quirk");
1615 dwc->dis_enblslpm_quirk = device_property_read_bool(dev,
1616 "snps,dis_enblslpm_quirk");
1617 dwc->dis_u1_entry_quirk = device_property_read_bool(dev,
1618 "snps,dis-u1-entry-quirk");
1619 dwc->dis_u2_entry_quirk = device_property_read_bool(dev,
1620 "snps,dis-u2-entry-quirk");
1621 dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev,
1622 "snps,dis_rxdet_inp3_quirk");
1623 dwc->dis_u2_freeclk_exists_quirk = device_property_read_bool(dev,
1624 "snps,dis-u2-freeclk-exists-quirk");
1625 dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev,
1626 "snps,dis-del-phy-power-chg-quirk");
1627 dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev,
1628 "snps,dis-tx-ipgap-linecheck-quirk");
1629 dwc->resume_hs_terminations = device_property_read_bool(dev,
1630 "snps,resume-hs-terminations");
1631 dwc->ulpi_ext_vbus_drv = device_property_read_bool(dev,
1632 "snps,ulpi-ext-vbus-drv");
1633 dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev,
1634 "snps,parkmode-disable-ss-quirk");
1635 dwc->parkmode_disable_hs_quirk = device_property_read_bool(dev,
1636 "snps,parkmode-disable-hs-quirk");
1637 dwc->gfladj_refclk_lpm_sel = device_property_read_bool(dev,
1638 "snps,gfladj-refclk-lpm-sel-quirk");
1639
1640 dwc->tx_de_emphasis_quirk = device_property_read_bool(dev,
1641 "snps,tx_de_emphasis_quirk");
1642 device_property_read_u8(dev, "snps,tx_de_emphasis",
1643 &tx_de_emphasis);
1644 device_property_read_string(dev, "snps,hsphy_interface",
1645 &dwc->hsphy_interface);
1646 device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
1647 &dwc->fladj);
1648 device_property_read_u32(dev, "snps,ref-clock-period-ns",
1649 &dwc->ref_clk_per);
1650
1651 dwc->dis_metastability_quirk = device_property_read_bool(dev,
1652 "snps,dis_metastability_quirk");
1653
1654 dwc->dis_split_quirk = device_property_read_bool(dev,
1655 "snps,dis-split-quirk");
1656
1657 dwc->lpm_nyet_threshold = lpm_nyet_threshold;
1658 dwc->tx_de_emphasis = tx_de_emphasis;
1659
1660 dwc->hird_threshold = hird_threshold;
1661
1662 dwc->rx_thr_num_pkt = rx_thr_num_pkt;
1663 dwc->rx_max_burst = rx_max_burst;
1664
1665 dwc->tx_thr_num_pkt = tx_thr_num_pkt;
1666 dwc->tx_max_burst = tx_max_burst;
1667
1668 dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd;
1669 dwc->rx_max_burst_prd = rx_max_burst_prd;
1670
1671 dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd;
1672 dwc->tx_max_burst_prd = tx_max_burst_prd;
1673
1674 dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num;
1675 }
1676
1677 /* check whether the core supports IMOD */
dwc3_has_imod(struct dwc3 * dwc)1678 bool dwc3_has_imod(struct dwc3 *dwc)
1679 {
1680 return DWC3_VER_IS_WITHIN(DWC3, 300A, ANY) ||
1681 DWC3_VER_IS_WITHIN(DWC31, 120A, ANY) ||
1682 DWC3_IP_IS(DWC32);
1683 }
1684
dwc3_check_params(struct dwc3 * dwc)1685 static void dwc3_check_params(struct dwc3 *dwc)
1686 {
1687 struct device *dev = dwc->dev;
1688 unsigned int hwparam_gen =
1689 DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3);
1690
1691 /*
1692 * Enable IMOD for all supporting controllers.
1693 *
1694 * Particularly, DWC_usb3 v3.00a must enable this feature for
1695 * the following reason:
1696 *
1697 * Workaround for STAR 9000961433 which affects only version
1698 * 3.00a of the DWC_usb3 core. This prevents the controller
1699 * interrupt from being masked while handling events. IMOD
1700 * allows us to work around this issue. Enable it for the
1701 * affected version.
1702 */
1703 if (dwc3_has_imod((dwc)))
1704 dwc->imod_interval = 1;
1705
1706 /* Check the maximum_speed parameter */
1707 switch (dwc->maximum_speed) {
1708 case USB_SPEED_FULL:
1709 case USB_SPEED_HIGH:
1710 break;
1711 case USB_SPEED_SUPER:
1712 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS)
1713 dev_warn(dev, "UDC doesn't support Gen 1\n");
1714 break;
1715 case USB_SPEED_SUPER_PLUS:
1716 if ((DWC3_IP_IS(DWC32) &&
1717 hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) ||
1718 (!DWC3_IP_IS(DWC32) &&
1719 hwparam_gen != DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
1720 dev_warn(dev, "UDC doesn't support SSP\n");
1721 break;
1722 default:
1723 dev_err(dev, "invalid maximum_speed parameter %d\n",
1724 dwc->maximum_speed);
1725 fallthrough;
1726 case USB_SPEED_UNKNOWN:
1727 switch (hwparam_gen) {
1728 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
1729 dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
1730 break;
1731 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
1732 if (DWC3_IP_IS(DWC32))
1733 dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
1734 else
1735 dwc->maximum_speed = USB_SPEED_SUPER;
1736 break;
1737 case DWC3_GHWPARAMS3_SSPHY_IFC_DIS:
1738 dwc->maximum_speed = USB_SPEED_HIGH;
1739 break;
1740 default:
1741 dwc->maximum_speed = USB_SPEED_SUPER;
1742 break;
1743 }
1744 break;
1745 }
1746
1747 /*
1748 * Currently the controller does not have visibility into the HW
1749 * parameter to determine the maximum number of lanes the HW supports.
1750 * If the number of lanes is not specified in the device property, then
1751 * set the default to support dual-lane for DWC_usb32 and single-lane
1752 * for DWC_usb31 for super-speed-plus.
1753 */
1754 if (dwc->maximum_speed == USB_SPEED_SUPER_PLUS) {
1755 switch (dwc->max_ssp_rate) {
1756 case USB_SSP_GEN_2x1:
1757 if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1)
1758 dev_warn(dev, "UDC only supports Gen 1\n");
1759 break;
1760 case USB_SSP_GEN_1x2:
1761 case USB_SSP_GEN_2x2:
1762 if (DWC3_IP_IS(DWC31))
1763 dev_warn(dev, "UDC only supports single lane\n");
1764 break;
1765 case USB_SSP_GEN_UNKNOWN:
1766 default:
1767 switch (hwparam_gen) {
1768 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
1769 if (DWC3_IP_IS(DWC32))
1770 dwc->max_ssp_rate = USB_SSP_GEN_2x2;
1771 else
1772 dwc->max_ssp_rate = USB_SSP_GEN_2x1;
1773 break;
1774 case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
1775 if (DWC3_IP_IS(DWC32))
1776 dwc->max_ssp_rate = USB_SSP_GEN_1x2;
1777 break;
1778 }
1779 break;
1780 }
1781 }
1782 }
1783
dwc3_get_extcon(struct dwc3 * dwc)1784 static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
1785 {
1786 struct device *dev = dwc->dev;
1787 struct device_node *np_phy;
1788 struct extcon_dev *edev = NULL;
1789 const char *name;
1790
1791 if (device_property_read_bool(dev, "extcon"))
1792 return extcon_get_edev_by_phandle(dev, 0);
1793
1794 /*
1795 * Device tree platforms should get extcon via phandle.
1796 * On ACPI platforms, we get the name from a device property.
1797 * This device property is for kernel internal use only and
1798 * is expected to be set by the glue code.
1799 */
1800 if (device_property_read_string(dev, "linux,extcon-name", &name) == 0)
1801 return extcon_get_extcon_dev(name);
1802
1803 /*
1804 * Check explicitly if "usb-role-switch" is used since
1805 * extcon_find_edev_by_node() can not be used to check the absence of
1806 * an extcon device. In the absence of an device it will always return
1807 * EPROBE_DEFER.
1808 */
1809 if (IS_ENABLED(CONFIG_USB_ROLE_SWITCH) &&
1810 device_property_read_bool(dev, "usb-role-switch"))
1811 return NULL;
1812
1813 /*
1814 * Try to get an extcon device from the USB PHY controller's "port"
1815 * node. Check if it has the "port" node first, to avoid printing the
1816 * error message from underlying code, as it's a valid case: extcon
1817 * device (and "port" node) may be missing in case of "usb-role-switch"
1818 * or OTG mode.
1819 */
1820 np_phy = of_parse_phandle(dev->of_node, "phys", 0);
1821 if (of_graph_is_present(np_phy)) {
1822 struct device_node *np_conn;
1823
1824 np_conn = of_graph_get_remote_node(np_phy, -1, -1);
1825 if (np_conn)
1826 edev = extcon_find_edev_by_node(np_conn);
1827 of_node_put(np_conn);
1828 }
1829 of_node_put(np_phy);
1830
1831 return edev;
1832 }
1833
dwc3_get_clocks(struct dwc3 * dwc)1834 static int dwc3_get_clocks(struct dwc3 *dwc)
1835 {
1836 struct device *dev = dwc->dev;
1837
1838 if (!dev->of_node)
1839 return 0;
1840
1841 /*
1842 * Clocks are optional, but new DT platforms should support all clocks
1843 * as required by the DT-binding.
1844 * Some devices have different clock names in legacy device trees,
1845 * check for them to retain backwards compatibility.
1846 */
1847 dwc->bus_clk = devm_clk_get_optional(dev, "bus_early");
1848 if (IS_ERR(dwc->bus_clk)) {
1849 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
1850 "could not get bus clock\n");
1851 }
1852
1853 if (dwc->bus_clk == NULL) {
1854 dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk");
1855 if (IS_ERR(dwc->bus_clk)) {
1856 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
1857 "could not get bus clock\n");
1858 }
1859 }
1860
1861 dwc->ref_clk = devm_clk_get_optional(dev, "ref");
1862 if (IS_ERR(dwc->ref_clk)) {
1863 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
1864 "could not get ref clock\n");
1865 }
1866
1867 if (dwc->ref_clk == NULL) {
1868 dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk");
1869 if (IS_ERR(dwc->ref_clk)) {
1870 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
1871 "could not get ref clock\n");
1872 }
1873 }
1874
1875 dwc->susp_clk = devm_clk_get_optional(dev, "suspend");
1876 if (IS_ERR(dwc->susp_clk)) {
1877 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
1878 "could not get suspend clock\n");
1879 }
1880
1881 if (dwc->susp_clk == NULL) {
1882 dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk");
1883 if (IS_ERR(dwc->susp_clk)) {
1884 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
1885 "could not get suspend clock\n");
1886 }
1887 }
1888
1889 return 0;
1890 }
1891
dwc3_get_usb_power_supply(struct dwc3 * dwc)1892 static struct power_supply *dwc3_get_usb_power_supply(struct dwc3 *dwc)
1893 {
1894 struct power_supply *usb_psy;
1895 const char *usb_psy_name;
1896 int ret;
1897
1898 ret = device_property_read_string(dwc->dev, "usb-psy-name", &usb_psy_name);
1899 if (ret < 0)
1900 return NULL;
1901
1902 usb_psy = power_supply_get_by_name(usb_psy_name);
1903 if (!usb_psy)
1904 return ERR_PTR(-EPROBE_DEFER);
1905
1906 return usb_psy;
1907 }
1908
dwc3_probe(struct platform_device * pdev)1909 static int dwc3_probe(struct platform_device *pdev)
1910 {
1911 struct device *dev = &pdev->dev;
1912 struct resource *res, dwc_res;
1913 void __iomem *regs;
1914 struct dwc3 *dwc;
1915 int ret;
1916
1917 dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
1918 if (!dwc)
1919 return -ENOMEM;
1920
1921 dwc->dev = dev;
1922
1923 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1924 if (!res) {
1925 dev_err(dev, "missing memory resource\n");
1926 return -ENODEV;
1927 }
1928
1929 dwc->xhci_resources[0].start = res->start;
1930 dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
1931 DWC3_XHCI_REGS_END;
1932 dwc->xhci_resources[0].flags = res->flags;
1933 dwc->xhci_resources[0].name = res->name;
1934
1935 /*
1936 * Request memory region but exclude xHCI regs,
1937 * since it will be requested by the xhci-plat driver.
1938 */
1939 dwc_res = *res;
1940 dwc_res.start += DWC3_GLOBALS_REGS_START;
1941
1942 if (dev->of_node) {
1943 struct device_node *parent = of_get_parent(dev->of_node);
1944
1945 if (of_device_is_compatible(parent, "realtek,rtd-dwc3")) {
1946 dwc_res.start -= DWC3_GLOBALS_REGS_START;
1947 dwc_res.start += DWC3_RTK_RTD_GLOBALS_REGS_START;
1948 }
1949
1950 of_node_put(parent);
1951 }
1952
1953 regs = devm_ioremap_resource(dev, &dwc_res);
1954 if (IS_ERR(regs))
1955 return PTR_ERR(regs);
1956
1957 dwc->regs = regs;
1958 dwc->regs_size = resource_size(&dwc_res);
1959
1960 dwc3_get_properties(dwc);
1961
1962 dwc->usb_psy = dwc3_get_usb_power_supply(dwc);
1963 if (IS_ERR(dwc->usb_psy))
1964 return dev_err_probe(dev, PTR_ERR(dwc->usb_psy), "couldn't get usb power supply\n");
1965
1966 dwc->reset = devm_reset_control_array_get_optional_shared(dev);
1967 if (IS_ERR(dwc->reset)) {
1968 ret = PTR_ERR(dwc->reset);
1969 goto err_put_psy;
1970 }
1971
1972 ret = dwc3_get_clocks(dwc);
1973 if (ret)
1974 goto err_put_psy;
1975
1976 ret = reset_control_deassert(dwc->reset);
1977 if (ret)
1978 goto err_put_psy;
1979
1980 ret = dwc3_clk_enable(dwc);
1981 if (ret)
1982 goto err_assert_reset;
1983
1984 if (!dwc3_core_is_valid(dwc)) {
1985 dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
1986 ret = -ENODEV;
1987 goto err_disable_clks;
1988 }
1989
1990 platform_set_drvdata(pdev, dwc);
1991 dwc3_cache_hwparams(dwc);
1992
1993 if (!dwc->sysdev_is_parent &&
1994 DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) {
1995 ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
1996 if (ret)
1997 goto err_disable_clks;
1998 }
1999
2000 spin_lock_init(&dwc->lock);
2001 mutex_init(&dwc->mutex);
2002
2003 pm_runtime_get_noresume(dev);
2004 pm_runtime_set_active(dev);
2005 pm_runtime_use_autosuspend(dev);
2006 pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
2007 pm_runtime_enable(dev);
2008
2009 pm_runtime_forbid(dev);
2010
2011 ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
2012 if (ret) {
2013 dev_err(dwc->dev, "failed to allocate event buffers\n");
2014 ret = -ENOMEM;
2015 goto err_allow_rpm;
2016 }
2017
2018 dwc->edev = dwc3_get_extcon(dwc);
2019 if (IS_ERR(dwc->edev)) {
2020 ret = dev_err_probe(dwc->dev, PTR_ERR(dwc->edev), "failed to get extcon\n");
2021 goto err_free_event_buffers;
2022 }
2023
2024 ret = dwc3_get_dr_mode(dwc);
2025 if (ret)
2026 goto err_free_event_buffers;
2027
2028 ret = dwc3_core_init(dwc);
2029 if (ret) {
2030 dev_err_probe(dev, ret, "failed to initialize core\n");
2031 goto err_free_event_buffers;
2032 }
2033
2034 dwc3_check_params(dwc);
2035 dwc3_debugfs_init(dwc);
2036
2037 ret = dwc3_core_init_mode(dwc);
2038 if (ret)
2039 goto err_exit_debugfs;
2040
2041 pm_runtime_put(dev);
2042
2043 dma_set_max_seg_size(dev, UINT_MAX);
2044
2045 return 0;
2046
2047 err_exit_debugfs:
2048 dwc3_debugfs_exit(dwc);
2049 dwc3_event_buffers_cleanup(dwc);
2050 dwc3_phy_power_off(dwc);
2051 dwc3_phy_exit(dwc);
2052 dwc3_ulpi_exit(dwc);
2053 err_free_event_buffers:
2054 dwc3_free_event_buffers(dwc);
2055 err_allow_rpm:
2056 pm_runtime_allow(dev);
2057 pm_runtime_disable(dev);
2058 pm_runtime_dont_use_autosuspend(dev);
2059 pm_runtime_set_suspended(dev);
2060 pm_runtime_put_noidle(dev);
2061 err_disable_clks:
2062 dwc3_clk_disable(dwc);
2063 err_assert_reset:
2064 reset_control_assert(dwc->reset);
2065 err_put_psy:
2066 if (dwc->usb_psy)
2067 power_supply_put(dwc->usb_psy);
2068
2069 return ret;
2070 }
2071
dwc3_remove(struct platform_device * pdev)2072 static void dwc3_remove(struct platform_device *pdev)
2073 {
2074 struct dwc3 *dwc = platform_get_drvdata(pdev);
2075
2076 pm_runtime_get_sync(&pdev->dev);
2077
2078 dwc3_core_exit_mode(dwc);
2079 dwc3_debugfs_exit(dwc);
2080
2081 dwc3_core_exit(dwc);
2082 dwc3_ulpi_exit(dwc);
2083
2084 pm_runtime_allow(&pdev->dev);
2085 pm_runtime_disable(&pdev->dev);
2086 pm_runtime_dont_use_autosuspend(&pdev->dev);
2087 pm_runtime_put_noidle(&pdev->dev);
2088 /*
2089 * HACK: Clear the driver data, which is currently accessed by parent
2090 * glue drivers, before allowing the parent to suspend.
2091 */
2092 platform_set_drvdata(pdev, NULL);
2093 pm_runtime_set_suspended(&pdev->dev);
2094
2095 dwc3_free_event_buffers(dwc);
2096
2097 if (dwc->usb_psy)
2098 power_supply_put(dwc->usb_psy);
2099 }
2100
2101 #ifdef CONFIG_PM
dwc3_core_init_for_resume(struct dwc3 * dwc)2102 static int dwc3_core_init_for_resume(struct dwc3 *dwc)
2103 {
2104 int ret;
2105
2106 ret = reset_control_deassert(dwc->reset);
2107 if (ret)
2108 return ret;
2109
2110 ret = dwc3_clk_enable(dwc);
2111 if (ret)
2112 goto assert_reset;
2113
2114 ret = dwc3_core_init(dwc);
2115 if (ret)
2116 goto disable_clks;
2117
2118 return 0;
2119
2120 disable_clks:
2121 dwc3_clk_disable(dwc);
2122 assert_reset:
2123 reset_control_assert(dwc->reset);
2124
2125 return ret;
2126 }
2127
dwc3_suspend_common(struct dwc3 * dwc,pm_message_t msg)2128 static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
2129 {
2130 u32 reg;
2131
2132 if (!pm_runtime_suspended(dwc->dev) && !PMSG_IS_AUTO(msg)) {
2133 dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) &
2134 DWC3_GUSB2PHYCFG_SUSPHY) ||
2135 (dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)) &
2136 DWC3_GUSB3PIPECTL_SUSPHY);
2137 /*
2138 * TI AM62 platform requires SUSPHY to be
2139 * enabled for system suspend to work.
2140 */
2141 if (!dwc->susphy_state)
2142 dwc3_enable_susphy(dwc, true);
2143 }
2144
2145 switch (dwc->current_dr_role) {
2146 case DWC3_GCTL_PRTCAP_DEVICE:
2147 if (pm_runtime_suspended(dwc->dev))
2148 break;
2149 dwc3_gadget_suspend(dwc);
2150 synchronize_irq(dwc->irq_gadget);
2151 dwc3_core_exit(dwc);
2152 break;
2153 case DWC3_GCTL_PRTCAP_HOST:
2154 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
2155 dwc3_core_exit(dwc);
2156 break;
2157 }
2158
2159 /* Let controller to suspend HSPHY before PHY driver suspends */
2160 if (dwc->dis_u2_susphy_quirk ||
2161 dwc->dis_enblslpm_quirk) {
2162 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
2163 reg |= DWC3_GUSB2PHYCFG_ENBLSLPM |
2164 DWC3_GUSB2PHYCFG_SUSPHY;
2165 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
2166
2167 /* Give some time for USB2 PHY to suspend */
2168 usleep_range(5000, 6000);
2169 }
2170
2171 phy_pm_runtime_put_sync(dwc->usb2_generic_phy);
2172 phy_pm_runtime_put_sync(dwc->usb3_generic_phy);
2173 break;
2174 case DWC3_GCTL_PRTCAP_OTG:
2175 /* do nothing during runtime_suspend */
2176 if (PMSG_IS_AUTO(msg))
2177 break;
2178
2179 if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
2180 dwc3_gadget_suspend(dwc);
2181 synchronize_irq(dwc->irq_gadget);
2182 }
2183
2184 dwc3_otg_exit(dwc);
2185 dwc3_core_exit(dwc);
2186 break;
2187 default:
2188 /* do nothing */
2189 break;
2190 }
2191
2192 return 0;
2193 }
2194
dwc3_resume_common(struct dwc3 * dwc,pm_message_t msg)2195 static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
2196 {
2197 int ret;
2198 u32 reg;
2199
2200 switch (dwc->current_dr_role) {
2201 case DWC3_GCTL_PRTCAP_DEVICE:
2202 ret = dwc3_core_init_for_resume(dwc);
2203 if (ret)
2204 return ret;
2205
2206 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true);
2207 dwc3_gadget_resume(dwc);
2208 break;
2209 case DWC3_GCTL_PRTCAP_HOST:
2210 if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
2211 ret = dwc3_core_init_for_resume(dwc);
2212 if (ret)
2213 return ret;
2214 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, true);
2215 break;
2216 }
2217 /* Restore GUSB2PHYCFG bits that were modified in suspend */
2218 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
2219 if (dwc->dis_u2_susphy_quirk)
2220 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
2221
2222 if (dwc->dis_enblslpm_quirk)
2223 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
2224
2225 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
2226
2227 phy_pm_runtime_get_sync(dwc->usb2_generic_phy);
2228 phy_pm_runtime_get_sync(dwc->usb3_generic_phy);
2229 break;
2230 case DWC3_GCTL_PRTCAP_OTG:
2231 /* nothing to do on runtime_resume */
2232 if (PMSG_IS_AUTO(msg))
2233 break;
2234
2235 ret = dwc3_core_init_for_resume(dwc);
2236 if (ret)
2237 return ret;
2238
2239 dwc3_set_prtcap(dwc, dwc->current_dr_role, true);
2240
2241 dwc3_otg_init(dwc);
2242 if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) {
2243 dwc3_otg_host_init(dwc);
2244 } else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
2245 dwc3_gadget_resume(dwc);
2246 }
2247
2248 break;
2249 default:
2250 /* do nothing */
2251 break;
2252 }
2253
2254 if (!PMSG_IS_AUTO(msg)) {
2255 /* restore SUSPHY state to that before system suspend. */
2256 dwc3_enable_susphy(dwc, dwc->susphy_state);
2257 }
2258
2259 return 0;
2260 }
2261
dwc3_runtime_checks(struct dwc3 * dwc)2262 static int dwc3_runtime_checks(struct dwc3 *dwc)
2263 {
2264 switch (dwc->current_dr_role) {
2265 case DWC3_GCTL_PRTCAP_DEVICE:
2266 if (dwc->connected)
2267 return -EBUSY;
2268 break;
2269 case DWC3_GCTL_PRTCAP_HOST:
2270 default:
2271 /* do nothing */
2272 break;
2273 }
2274
2275 return 0;
2276 }
2277
dwc3_runtime_suspend(struct device * dev)2278 static int dwc3_runtime_suspend(struct device *dev)
2279 {
2280 struct dwc3 *dwc = dev_get_drvdata(dev);
2281 int ret;
2282
2283 if (dwc3_runtime_checks(dwc))
2284 return -EBUSY;
2285
2286 ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND);
2287 if (ret)
2288 return ret;
2289
2290 return 0;
2291 }
2292
dwc3_runtime_resume(struct device * dev)2293 static int dwc3_runtime_resume(struct device *dev)
2294 {
2295 struct dwc3 *dwc = dev_get_drvdata(dev);
2296 int ret;
2297
2298 ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME);
2299 if (ret)
2300 return ret;
2301
2302 switch (dwc->current_dr_role) {
2303 case DWC3_GCTL_PRTCAP_DEVICE:
2304 if (dwc->pending_events) {
2305 pm_runtime_put(dwc->dev);
2306 dwc->pending_events = false;
2307 enable_irq(dwc->irq_gadget);
2308 }
2309 break;
2310 case DWC3_GCTL_PRTCAP_HOST:
2311 default:
2312 /* do nothing */
2313 break;
2314 }
2315
2316 pm_runtime_mark_last_busy(dev);
2317
2318 return 0;
2319 }
2320
dwc3_runtime_idle(struct device * dev)2321 static int dwc3_runtime_idle(struct device *dev)
2322 {
2323 struct dwc3 *dwc = dev_get_drvdata(dev);
2324
2325 switch (dwc->current_dr_role) {
2326 case DWC3_GCTL_PRTCAP_DEVICE:
2327 if (dwc3_runtime_checks(dwc))
2328 return -EBUSY;
2329 break;
2330 case DWC3_GCTL_PRTCAP_HOST:
2331 default:
2332 /* do nothing */
2333 break;
2334 }
2335
2336 pm_runtime_mark_last_busy(dev);
2337 pm_runtime_autosuspend(dev);
2338
2339 return 0;
2340 }
2341 #endif /* CONFIG_PM */
2342
2343 #ifdef CONFIG_PM_SLEEP
dwc3_suspend(struct device * dev)2344 static int dwc3_suspend(struct device *dev)
2345 {
2346 struct dwc3 *dwc = dev_get_drvdata(dev);
2347 int ret;
2348
2349 ret = dwc3_suspend_common(dwc, PMSG_SUSPEND);
2350 if (ret)
2351 return ret;
2352
2353 pinctrl_pm_select_sleep_state(dev);
2354
2355 return 0;
2356 }
2357
dwc3_resume(struct device * dev)2358 static int dwc3_resume(struct device *dev)
2359 {
2360 struct dwc3 *dwc = dev_get_drvdata(dev);
2361 int ret;
2362
2363 pinctrl_pm_select_default_state(dev);
2364
2365 ret = dwc3_resume_common(dwc, PMSG_RESUME);
2366 if (ret)
2367 return ret;
2368
2369 pm_runtime_disable(dev);
2370 pm_runtime_set_active(dev);
2371 pm_runtime_enable(dev);
2372
2373 return 0;
2374 }
2375
dwc3_complete(struct device * dev)2376 static void dwc3_complete(struct device *dev)
2377 {
2378 struct dwc3 *dwc = dev_get_drvdata(dev);
2379 u32 reg;
2380
2381 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST &&
2382 dwc->dis_split_quirk) {
2383 reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
2384 reg |= DWC3_GUCTL3_SPLITDISABLE;
2385 dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
2386 }
2387 }
2388 #else
2389 #define dwc3_complete NULL
2390 #endif /* CONFIG_PM_SLEEP */
2391
2392 static const struct dev_pm_ops dwc3_dev_pm_ops = {
2393 SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
2394 .complete = dwc3_complete,
2395
2396 /*
2397 * Runtime suspend halts the controller on disconnection. It relies on
2398 * platforms with custom connection notification to start the controller
2399 * again.
2400 */
2401 SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
2402 dwc3_runtime_idle)
2403 };
2404
2405 #ifdef CONFIG_OF
2406 static const struct of_device_id of_dwc3_match[] = {
2407 {
2408 .compatible = "snps,dwc3"
2409 },
2410 {
2411 .compatible = "synopsys,dwc3"
2412 },
2413 { },
2414 };
2415 MODULE_DEVICE_TABLE(of, of_dwc3_match);
2416 #endif
2417
2418 #ifdef CONFIG_ACPI
2419
2420 #define ACPI_ID_INTEL_BSW "808622B7"
2421
2422 static const struct acpi_device_id dwc3_acpi_match[] = {
2423 { ACPI_ID_INTEL_BSW, 0 },
2424 { },
2425 };
2426 MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match);
2427 #endif
2428
2429 static struct platform_driver dwc3_driver = {
2430 .probe = dwc3_probe,
2431 .remove_new = dwc3_remove,
2432 .driver = {
2433 .name = "dwc3",
2434 .of_match_table = of_match_ptr(of_dwc3_match),
2435 .acpi_match_table = ACPI_PTR(dwc3_acpi_match),
2436 .pm = &dwc3_dev_pm_ops,
2437 },
2438 };
2439
2440 module_platform_driver(dwc3_driver);
2441
2442 MODULE_ALIAS("platform:dwc3");
2443 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
2444 MODULE_LICENSE("GPL v2");
2445 MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver");
2446