1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * xhci-dbgcap.c - xHCI debug capability support
4 *
5 * Copyright (C) 2017 Intel Corporation
6 *
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
8 */
9 #include <linux/dma-mapping.h>
10 #include <linux/slab.h>
11 #include <linux/nls.h>
12
13 #include "xhci.h"
14 #include "xhci-trace.h"
15 #include "xhci-dbgcap.h"
16
dbc_free_ctx(struct device * dev,struct xhci_container_ctx * ctx)17 static void dbc_free_ctx(struct device *dev, struct xhci_container_ctx *ctx)
18 {
19 if (!ctx)
20 return;
21 dma_free_coherent(dev, ctx->size, ctx->bytes, ctx->dma);
22 kfree(ctx);
23 }
24
25 /* we use only one segment for DbC rings */
dbc_ring_free(struct device * dev,struct xhci_ring * ring)26 static void dbc_ring_free(struct device *dev, struct xhci_ring *ring)
27 {
28 if (!ring)
29 return;
30
31 if (ring->first_seg && ring->first_seg->trbs) {
32 dma_free_coherent(dev, TRB_SEGMENT_SIZE,
33 ring->first_seg->trbs,
34 ring->first_seg->dma);
35 kfree(ring->first_seg);
36 }
37 kfree(ring);
38 }
39
xhci_dbc_populate_strings(struct dbc_str_descs * strings)40 static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
41 {
42 struct usb_string_descriptor *s_desc;
43 u32 string_length;
44
45 /* Serial string: */
46 s_desc = (struct usb_string_descriptor *)strings->serial;
47 utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
48 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
49 DBC_MAX_STRING_LENGTH);
50
51 s_desc->bLength = (strlen(DBC_STRING_SERIAL) + 1) * 2;
52 s_desc->bDescriptorType = USB_DT_STRING;
53 string_length = s_desc->bLength;
54 string_length <<= 8;
55
56 /* Product string: */
57 s_desc = (struct usb_string_descriptor *)strings->product;
58 utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
59 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
60 DBC_MAX_STRING_LENGTH);
61
62 s_desc->bLength = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
63 s_desc->bDescriptorType = USB_DT_STRING;
64 string_length += s_desc->bLength;
65 string_length <<= 8;
66
67 /* Manufacture string: */
68 s_desc = (struct usb_string_descriptor *)strings->manufacturer;
69 utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
70 strlen(DBC_STRING_MANUFACTURER),
71 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
72 DBC_MAX_STRING_LENGTH);
73
74 s_desc->bLength = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
75 s_desc->bDescriptorType = USB_DT_STRING;
76 string_length += s_desc->bLength;
77 string_length <<= 8;
78
79 /* String0: */
80 strings->string0[0] = 4;
81 strings->string0[1] = USB_DT_STRING;
82 strings->string0[2] = 0x09;
83 strings->string0[3] = 0x04;
84 string_length += 4;
85
86 return string_length;
87 }
88
xhci_dbc_init_contexts(struct xhci_dbc * dbc,u32 string_length)89 static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
90 {
91 struct dbc_info_context *info;
92 struct xhci_ep_ctx *ep_ctx;
93 u32 dev_info;
94 dma_addr_t deq, dma;
95 unsigned int max_burst;
96
97 if (!dbc)
98 return;
99
100 /* Populate info Context: */
101 info = (struct dbc_info_context *)dbc->ctx->bytes;
102 dma = dbc->string_dma;
103 info->string0 = cpu_to_le64(dma);
104 info->manufacturer = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
105 info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
106 info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
107 info->length = cpu_to_le32(string_length);
108
109 /* Populate bulk out endpoint context: */
110 ep_ctx = dbc_bulkout_ctx(dbc);
111 max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
112 deq = dbc_bulkout_enq(dbc);
113 ep_ctx->ep_info = 0;
114 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
115 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
116
117 /* Populate bulk in endpoint context: */
118 ep_ctx = dbc_bulkin_ctx(dbc);
119 deq = dbc_bulkin_enq(dbc);
120 ep_ctx->ep_info = 0;
121 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
122 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
123
124 /* Set DbC context and info registers: */
125 lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
126
127 dev_info = (dbc->idVendor << 16) | dbc->bInterfaceProtocol;
128 writel(dev_info, &dbc->regs->devinfo1);
129
130 dev_info = (dbc->bcdDevice << 16) | dbc->idProduct;
131 writel(dev_info, &dbc->regs->devinfo2);
132 }
133
xhci_dbc_giveback(struct dbc_request * req,int status)134 static void xhci_dbc_giveback(struct dbc_request *req, int status)
135 __releases(&dbc->lock)
136 __acquires(&dbc->lock)
137 {
138 struct xhci_dbc *dbc = req->dbc;
139 struct device *dev = dbc->dev;
140
141 list_del_init(&req->list_pending);
142 req->trb_dma = 0;
143 req->trb = NULL;
144
145 if (req->status == -EINPROGRESS)
146 req->status = status;
147
148 trace_xhci_dbc_giveback_request(req);
149
150 dma_unmap_single(dev,
151 req->dma,
152 req->length,
153 dbc_ep_dma_direction(req));
154
155 /* Give back the transfer request: */
156 spin_unlock(&dbc->lock);
157 req->complete(dbc, req);
158 spin_lock(&dbc->lock);
159 }
160
trb_to_noop(union xhci_trb * trb)161 static void trb_to_noop(union xhci_trb *trb)
162 {
163 trb->generic.field[0] = 0;
164 trb->generic.field[1] = 0;
165 trb->generic.field[2] = 0;
166 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
167 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
168 }
169
xhci_dbc_flush_single_request(struct dbc_request * req)170 static void xhci_dbc_flush_single_request(struct dbc_request *req)
171 {
172 trb_to_noop(req->trb);
173 xhci_dbc_giveback(req, -ESHUTDOWN);
174 }
175
xhci_dbc_flush_endpoint_requests(struct dbc_ep * dep)176 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
177 {
178 struct dbc_request *req, *tmp;
179
180 list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
181 xhci_dbc_flush_single_request(req);
182 }
183
xhci_dbc_flush_requests(struct xhci_dbc * dbc)184 static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
185 {
186 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
187 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
188 }
189
190 struct dbc_request *
dbc_alloc_request(struct xhci_dbc * dbc,unsigned int direction,gfp_t flags)191 dbc_alloc_request(struct xhci_dbc *dbc, unsigned int direction, gfp_t flags)
192 {
193 struct dbc_request *req;
194
195 if (direction != BULK_IN &&
196 direction != BULK_OUT)
197 return NULL;
198
199 if (!dbc)
200 return NULL;
201
202 req = kzalloc(sizeof(*req), flags);
203 if (!req)
204 return NULL;
205
206 req->dbc = dbc;
207 INIT_LIST_HEAD(&req->list_pending);
208 INIT_LIST_HEAD(&req->list_pool);
209 req->direction = direction;
210
211 trace_xhci_dbc_alloc_request(req);
212
213 return req;
214 }
215
216 void
dbc_free_request(struct dbc_request * req)217 dbc_free_request(struct dbc_request *req)
218 {
219 trace_xhci_dbc_free_request(req);
220
221 kfree(req);
222 }
223
224 static void
xhci_dbc_queue_trb(struct xhci_ring * ring,u32 field1,u32 field2,u32 field3,u32 field4)225 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
226 u32 field2, u32 field3, u32 field4)
227 {
228 union xhci_trb *trb, *next;
229
230 trb = ring->enqueue;
231 trb->generic.field[0] = cpu_to_le32(field1);
232 trb->generic.field[1] = cpu_to_le32(field2);
233 trb->generic.field[2] = cpu_to_le32(field3);
234 trb->generic.field[3] = cpu_to_le32(field4);
235
236 trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
237
238 ring->num_trbs_free--;
239 next = ++(ring->enqueue);
240 if (TRB_TYPE_LINK_LE32(next->link.control)) {
241 next->link.control ^= cpu_to_le32(TRB_CYCLE);
242 ring->enqueue = ring->enq_seg->trbs;
243 ring->cycle_state ^= 1;
244 }
245 }
246
xhci_dbc_queue_bulk_tx(struct dbc_ep * dep,struct dbc_request * req)247 static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
248 struct dbc_request *req)
249 {
250 u64 addr;
251 union xhci_trb *trb;
252 unsigned int num_trbs;
253 struct xhci_dbc *dbc = req->dbc;
254 struct xhci_ring *ring = dep->ring;
255 u32 length, control, cycle;
256
257 num_trbs = count_trbs(req->dma, req->length);
258 WARN_ON(num_trbs != 1);
259 if (ring->num_trbs_free < num_trbs)
260 return -EBUSY;
261
262 addr = req->dma;
263 trb = ring->enqueue;
264 cycle = ring->cycle_state;
265 length = TRB_LEN(req->length);
266 control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
267
268 if (cycle)
269 control &= cpu_to_le32(~TRB_CYCLE);
270 else
271 control |= cpu_to_le32(TRB_CYCLE);
272
273 req->trb = ring->enqueue;
274 req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
275 xhci_dbc_queue_trb(ring,
276 lower_32_bits(addr),
277 upper_32_bits(addr),
278 length, control);
279
280 /*
281 * Add a barrier between writes of trb fields and flipping
282 * the cycle bit:
283 */
284 wmb();
285
286 if (cycle)
287 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
288 else
289 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
290
291 writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
292
293 return 0;
294 }
295
296 static int
dbc_ep_do_queue(struct dbc_request * req)297 dbc_ep_do_queue(struct dbc_request *req)
298 {
299 int ret;
300 struct xhci_dbc *dbc = req->dbc;
301 struct device *dev = dbc->dev;
302 struct dbc_ep *dep = &dbc->eps[req->direction];
303
304 if (!req->length || !req->buf)
305 return -EINVAL;
306
307 req->actual = 0;
308 req->status = -EINPROGRESS;
309
310 req->dma = dma_map_single(dev,
311 req->buf,
312 req->length,
313 dbc_ep_dma_direction(dep));
314 if (dma_mapping_error(dev, req->dma)) {
315 dev_err(dbc->dev, "failed to map buffer\n");
316 return -EFAULT;
317 }
318
319 ret = xhci_dbc_queue_bulk_tx(dep, req);
320 if (ret) {
321 dev_err(dbc->dev, "failed to queue trbs\n");
322 dma_unmap_single(dev,
323 req->dma,
324 req->length,
325 dbc_ep_dma_direction(dep));
326 return -EFAULT;
327 }
328
329 list_add_tail(&req->list_pending, &dep->list_pending);
330
331 return 0;
332 }
333
dbc_ep_queue(struct dbc_request * req)334 int dbc_ep_queue(struct dbc_request *req)
335 {
336 unsigned long flags;
337 struct xhci_dbc *dbc = req->dbc;
338 int ret = -ESHUTDOWN;
339
340 if (!dbc)
341 return -ENODEV;
342
343 if (req->direction != BULK_IN &&
344 req->direction != BULK_OUT)
345 return -EINVAL;
346
347 spin_lock_irqsave(&dbc->lock, flags);
348 if (dbc->state == DS_CONFIGURED)
349 ret = dbc_ep_do_queue(req);
350 spin_unlock_irqrestore(&dbc->lock, flags);
351
352 mod_delayed_work(system_wq, &dbc->event_work, 0);
353
354 trace_xhci_dbc_queue_request(req);
355
356 return ret;
357 }
358
xhci_dbc_do_eps_init(struct xhci_dbc * dbc,bool direction)359 static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction)
360 {
361 struct dbc_ep *dep;
362
363 dep = &dbc->eps[direction];
364 dep->dbc = dbc;
365 dep->direction = direction;
366 dep->ring = direction ? dbc->ring_in : dbc->ring_out;
367
368 INIT_LIST_HEAD(&dep->list_pending);
369 }
370
xhci_dbc_eps_init(struct xhci_dbc * dbc)371 static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
372 {
373 xhci_dbc_do_eps_init(dbc, BULK_OUT);
374 xhci_dbc_do_eps_init(dbc, BULK_IN);
375 }
376
xhci_dbc_eps_exit(struct xhci_dbc * dbc)377 static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
378 {
379 memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
380 }
381
dbc_erst_alloc(struct device * dev,struct xhci_ring * evt_ring,struct xhci_erst * erst,gfp_t flags)382 static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
383 struct xhci_erst *erst, gfp_t flags)
384 {
385 erst->entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry),
386 &erst->erst_dma_addr, flags);
387 if (!erst->entries)
388 return -ENOMEM;
389
390 erst->num_entries = 1;
391 erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
392 erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
393 erst->entries[0].rsvd = 0;
394 return 0;
395 }
396
dbc_erst_free(struct device * dev,struct xhci_erst * erst)397 static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
398 {
399 if (erst->entries)
400 dma_free_coherent(dev, sizeof(struct xhci_erst_entry),
401 erst->entries, erst->erst_dma_addr);
402 erst->entries = NULL;
403 }
404
405 static struct xhci_container_ctx *
dbc_alloc_ctx(struct device * dev,gfp_t flags)406 dbc_alloc_ctx(struct device *dev, gfp_t flags)
407 {
408 struct xhci_container_ctx *ctx;
409
410 ctx = kzalloc(sizeof(*ctx), flags);
411 if (!ctx)
412 return NULL;
413
414 /* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/
415 ctx->size = 3 * DBC_CONTEXT_SIZE;
416 ctx->bytes = dma_alloc_coherent(dev, ctx->size, &ctx->dma, flags);
417 if (!ctx->bytes) {
418 kfree(ctx);
419 return NULL;
420 }
421 return ctx;
422 }
423
424 static struct xhci_ring *
xhci_dbc_ring_alloc(struct device * dev,enum xhci_ring_type type,gfp_t flags)425 xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
426 {
427 struct xhci_ring *ring;
428 struct xhci_segment *seg;
429 dma_addr_t dma;
430
431 ring = kzalloc(sizeof(*ring), flags);
432 if (!ring)
433 return NULL;
434
435 ring->num_segs = 1;
436 ring->type = type;
437
438 seg = kzalloc(sizeof(*seg), flags);
439 if (!seg)
440 goto seg_fail;
441
442 ring->first_seg = seg;
443 ring->last_seg = seg;
444 seg->next = seg;
445
446 seg->trbs = dma_alloc_coherent(dev, TRB_SEGMENT_SIZE, &dma, flags);
447 if (!seg->trbs)
448 goto dma_fail;
449
450 seg->dma = dma;
451
452 /* Only event ring does not use link TRB */
453 if (type != TYPE_EVENT) {
454 union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
455
456 trb->link.segment_ptr = cpu_to_le64(dma);
457 trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
458 }
459 INIT_LIST_HEAD(&ring->td_list);
460 xhci_initialize_ring_info(ring, 1);
461 return ring;
462 dma_fail:
463 kfree(seg);
464 seg_fail:
465 kfree(ring);
466 return NULL;
467 }
468
xhci_dbc_mem_init(struct xhci_dbc * dbc,gfp_t flags)469 static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags)
470 {
471 int ret;
472 dma_addr_t deq;
473 u32 string_length;
474 struct device *dev = dbc->dev;
475
476 /* Allocate various rings for events and transfers: */
477 dbc->ring_evt = xhci_dbc_ring_alloc(dev, TYPE_EVENT, flags);
478 if (!dbc->ring_evt)
479 goto evt_fail;
480
481 dbc->ring_in = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
482 if (!dbc->ring_in)
483 goto in_fail;
484
485 dbc->ring_out = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
486 if (!dbc->ring_out)
487 goto out_fail;
488
489 /* Allocate and populate ERST: */
490 ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
491 if (ret)
492 goto erst_fail;
493
494 /* Allocate context data structure: */
495 dbc->ctx = dbc_alloc_ctx(dev, flags); /* was sysdev, and is still */
496 if (!dbc->ctx)
497 goto ctx_fail;
498
499 /* Allocate the string table: */
500 dbc->string_size = sizeof(struct dbc_str_descs);
501 dbc->string = dma_alloc_coherent(dev, dbc->string_size,
502 &dbc->string_dma, flags);
503 if (!dbc->string)
504 goto string_fail;
505
506 /* Setup ERST register: */
507 writel(dbc->erst.erst_size, &dbc->regs->ersts);
508
509 lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
510 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
511 dbc->ring_evt->dequeue);
512 lo_hi_writeq(deq, &dbc->regs->erdp);
513
514 /* Setup strings and contexts: */
515 string_length = xhci_dbc_populate_strings(dbc->string);
516 xhci_dbc_init_contexts(dbc, string_length);
517
518 xhci_dbc_eps_init(dbc);
519 dbc->state = DS_INITIALIZED;
520
521 return 0;
522
523 string_fail:
524 dbc_free_ctx(dev, dbc->ctx);
525 dbc->ctx = NULL;
526 ctx_fail:
527 dbc_erst_free(dev, &dbc->erst);
528 erst_fail:
529 dbc_ring_free(dev, dbc->ring_out);
530 dbc->ring_out = NULL;
531 out_fail:
532 dbc_ring_free(dev, dbc->ring_in);
533 dbc->ring_in = NULL;
534 in_fail:
535 dbc_ring_free(dev, dbc->ring_evt);
536 dbc->ring_evt = NULL;
537 evt_fail:
538 return -ENOMEM;
539 }
540
xhci_dbc_mem_cleanup(struct xhci_dbc * dbc)541 static void xhci_dbc_mem_cleanup(struct xhci_dbc *dbc)
542 {
543 if (!dbc)
544 return;
545
546 xhci_dbc_eps_exit(dbc);
547
548 if (dbc->string) {
549 dma_free_coherent(dbc->dev, dbc->string_size,
550 dbc->string, dbc->string_dma);
551 dbc->string = NULL;
552 }
553
554 dbc_free_ctx(dbc->dev, dbc->ctx);
555 dbc->ctx = NULL;
556
557 dbc_erst_free(dbc->dev, &dbc->erst);
558 dbc_ring_free(dbc->dev, dbc->ring_out);
559 dbc_ring_free(dbc->dev, dbc->ring_in);
560 dbc_ring_free(dbc->dev, dbc->ring_evt);
561 dbc->ring_in = NULL;
562 dbc->ring_out = NULL;
563 dbc->ring_evt = NULL;
564 }
565
xhci_do_dbc_start(struct xhci_dbc * dbc)566 static int xhci_do_dbc_start(struct xhci_dbc *dbc)
567 {
568 int ret;
569 u32 ctrl;
570
571 if (dbc->state != DS_DISABLED)
572 return -EINVAL;
573
574 writel(0, &dbc->regs->control);
575 ret = xhci_handshake(&dbc->regs->control,
576 DBC_CTRL_DBC_ENABLE,
577 0, 1000);
578 if (ret)
579 return ret;
580
581 ret = xhci_dbc_mem_init(dbc, GFP_ATOMIC);
582 if (ret)
583 return ret;
584
585 ctrl = readl(&dbc->regs->control);
586 writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
587 &dbc->regs->control);
588 ret = xhci_handshake(&dbc->regs->control,
589 DBC_CTRL_DBC_ENABLE,
590 DBC_CTRL_DBC_ENABLE, 1000);
591 if (ret)
592 return ret;
593
594 dbc->state = DS_ENABLED;
595
596 return 0;
597 }
598
xhci_do_dbc_stop(struct xhci_dbc * dbc)599 static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
600 {
601 if (dbc->state == DS_DISABLED)
602 return -1;
603
604 writel(0, &dbc->regs->control);
605 dbc->state = DS_DISABLED;
606
607 return 0;
608 }
609
xhci_dbc_start(struct xhci_dbc * dbc)610 static int xhci_dbc_start(struct xhci_dbc *dbc)
611 {
612 int ret;
613 unsigned long flags;
614
615 WARN_ON(!dbc);
616
617 pm_runtime_get_sync(dbc->dev); /* note this was self.controller */
618
619 spin_lock_irqsave(&dbc->lock, flags);
620 ret = xhci_do_dbc_start(dbc);
621 spin_unlock_irqrestore(&dbc->lock, flags);
622
623 if (ret) {
624 pm_runtime_put(dbc->dev); /* note this was self.controller */
625 return ret;
626 }
627
628 return mod_delayed_work(system_wq, &dbc->event_work, 1);
629 }
630
xhci_dbc_stop(struct xhci_dbc * dbc)631 static void xhci_dbc_stop(struct xhci_dbc *dbc)
632 {
633 int ret;
634 unsigned long flags;
635
636 WARN_ON(!dbc);
637
638 switch (dbc->state) {
639 case DS_DISABLED:
640 return;
641 case DS_CONFIGURED:
642 spin_lock(&dbc->lock);
643 xhci_dbc_flush_requests(dbc);
644 spin_unlock(&dbc->lock);
645
646 if (dbc->driver->disconnect)
647 dbc->driver->disconnect(dbc);
648 break;
649 default:
650 break;
651 }
652
653 cancel_delayed_work_sync(&dbc->event_work);
654
655 spin_lock_irqsave(&dbc->lock, flags);
656 ret = xhci_do_dbc_stop(dbc);
657 spin_unlock_irqrestore(&dbc->lock, flags);
658
659 if (!ret) {
660 xhci_dbc_mem_cleanup(dbc);
661 pm_runtime_put_sync(dbc->dev); /* note, was self.controller */
662 }
663 }
664
665 static void
handle_ep_halt_changes(struct xhci_dbc * dbc,struct dbc_ep * dep,bool halted)666 handle_ep_halt_changes(struct xhci_dbc *dbc, struct dbc_ep *dep, bool halted)
667 {
668 if (halted) {
669 dev_info(dbc->dev, "DbC Endpoint halted\n");
670 dep->halted = 1;
671
672 } else if (dep->halted) {
673 dev_info(dbc->dev, "DbC Endpoint halt cleared\n");
674 dep->halted = 0;
675
676 if (!list_empty(&dep->list_pending))
677 writel(DBC_DOOR_BELL_TARGET(dep->direction),
678 &dbc->regs->doorbell);
679 }
680 }
681
682 static void
dbc_handle_port_status(struct xhci_dbc * dbc,union xhci_trb * event)683 dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
684 {
685 u32 portsc;
686
687 portsc = readl(&dbc->regs->portsc);
688 if (portsc & DBC_PORTSC_CONN_CHANGE)
689 dev_info(dbc->dev, "DbC port connect change\n");
690
691 if (portsc & DBC_PORTSC_RESET_CHANGE)
692 dev_info(dbc->dev, "DbC port reset change\n");
693
694 if (portsc & DBC_PORTSC_LINK_CHANGE)
695 dev_info(dbc->dev, "DbC port link status change\n");
696
697 if (portsc & DBC_PORTSC_CONFIG_CHANGE)
698 dev_info(dbc->dev, "DbC config error change\n");
699
700 /* Port reset change bit will be cleared in other place: */
701 writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
702 }
703
dbc_handle_xfer_event(struct xhci_dbc * dbc,union xhci_trb * event)704 static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
705 {
706 struct dbc_ep *dep;
707 struct xhci_ring *ring;
708 int ep_id;
709 int status;
710 struct xhci_ep_ctx *ep_ctx;
711 u32 comp_code;
712 size_t remain_length;
713 struct dbc_request *req = NULL, *r;
714
715 comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
716 remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
717 ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
718 dep = (ep_id == EPID_OUT) ?
719 get_out_ep(dbc) : get_in_ep(dbc);
720 ep_ctx = (ep_id == EPID_OUT) ?
721 dbc_bulkout_ctx(dbc) : dbc_bulkin_ctx(dbc);
722 ring = dep->ring;
723
724 /* Match the pending request: */
725 list_for_each_entry(r, &dep->list_pending, list_pending) {
726 if (r->trb_dma == event->trans_event.buffer) {
727 req = r;
728 break;
729 }
730 if (r->status == -COMP_STALL_ERROR) {
731 dev_warn(dbc->dev, "Give back stale stalled req\n");
732 ring->num_trbs_free++;
733 xhci_dbc_giveback(r, 0);
734 }
735 }
736
737 if (!req) {
738 dev_warn(dbc->dev, "no matched request\n");
739 return;
740 }
741
742 trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
743
744 switch (comp_code) {
745 case COMP_SUCCESS:
746 remain_length = 0;
747 fallthrough;
748 case COMP_SHORT_PACKET:
749 status = 0;
750 break;
751 case COMP_TRB_ERROR:
752 case COMP_BABBLE_DETECTED_ERROR:
753 case COMP_USB_TRANSACTION_ERROR:
754 dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
755 status = -comp_code;
756 break;
757 case COMP_STALL_ERROR:
758 dev_warn(dbc->dev, "Stall error at bulk TRB %llx, remaining %zu, ep deq %llx\n",
759 event->trans_event.buffer, remain_length, ep_ctx->deq);
760 status = 0;
761 dep->halted = 1;
762
763 /*
764 * xHC DbC may trigger a STALL bulk xfer event when host sends a
765 * ClearFeature(ENDPOINT_HALT) request even if there wasn't an
766 * active bulk transfer.
767 *
768 * Don't give back this transfer request as hardware will later
769 * start processing TRBs starting from this 'STALLED' TRB,
770 * causing TRBs and requests to be out of sync.
771 *
772 * If STALL event shows some bytes were transferred then assume
773 * it's an actual transfer issue and give back the request.
774 * In this case mark the TRB as No-Op to avoid hw from using the
775 * TRB again.
776 */
777
778 if ((ep_ctx->deq & ~TRB_CYCLE) == event->trans_event.buffer) {
779 dev_dbg(dbc->dev, "Ep stopped on Stalled TRB\n");
780 if (remain_length == req->length) {
781 dev_dbg(dbc->dev, "Spurious stall event, keep req\n");
782 req->status = -COMP_STALL_ERROR;
783 req->actual = 0;
784 return;
785 }
786 dev_dbg(dbc->dev, "Give back stalled req, but turn TRB to No-op\n");
787 trb_to_noop(req->trb);
788 }
789 break;
790
791 default:
792 dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
793 status = -comp_code;
794 break;
795 }
796
797 ring->num_trbs_free++;
798 req->actual = req->length - remain_length;
799 xhci_dbc_giveback(req, status);
800 }
801
inc_evt_deq(struct xhci_ring * ring)802 static void inc_evt_deq(struct xhci_ring *ring)
803 {
804 /* If on the last TRB of the segment go back to the beginning */
805 if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
806 ring->cycle_state ^= 1;
807 ring->dequeue = ring->deq_seg->trbs;
808 return;
809 }
810 ring->dequeue++;
811 }
812
xhci_dbc_do_handle_events(struct xhci_dbc * dbc)813 static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
814 {
815 dma_addr_t deq;
816 union xhci_trb *evt;
817 u32 ctrl, portsc;
818 bool update_erdp = false;
819
820 /* DbC state machine: */
821 switch (dbc->state) {
822 case DS_DISABLED:
823 case DS_INITIALIZED:
824
825 return EVT_ERR;
826 case DS_ENABLED:
827 portsc = readl(&dbc->regs->portsc);
828 if (portsc & DBC_PORTSC_CONN_STATUS) {
829 dbc->state = DS_CONNECTED;
830 dev_info(dbc->dev, "DbC connected\n");
831 }
832
833 return EVT_DONE;
834 case DS_CONNECTED:
835 ctrl = readl(&dbc->regs->control);
836 if (ctrl & DBC_CTRL_DBC_RUN) {
837 dbc->state = DS_CONFIGURED;
838 dev_info(dbc->dev, "DbC configured\n");
839 portsc = readl(&dbc->regs->portsc);
840 writel(portsc, &dbc->regs->portsc);
841 return EVT_GSER;
842 }
843
844 return EVT_DONE;
845 case DS_CONFIGURED:
846 /* Handle cable unplug event: */
847 portsc = readl(&dbc->regs->portsc);
848 if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
849 !(portsc & DBC_PORTSC_CONN_STATUS)) {
850 dev_info(dbc->dev, "DbC cable unplugged\n");
851 dbc->state = DS_ENABLED;
852 xhci_dbc_flush_requests(dbc);
853
854 return EVT_DISC;
855 }
856
857 /* Handle debug port reset event: */
858 if (portsc & DBC_PORTSC_RESET_CHANGE) {
859 dev_info(dbc->dev, "DbC port reset\n");
860 writel(portsc, &dbc->regs->portsc);
861 dbc->state = DS_ENABLED;
862 xhci_dbc_flush_requests(dbc);
863
864 return EVT_DISC;
865 }
866
867 /* Check and handle changes in endpoint halt status */
868 ctrl = readl(&dbc->regs->control);
869 handle_ep_halt_changes(dbc, get_in_ep(dbc), ctrl & DBC_CTRL_HALT_IN_TR);
870 handle_ep_halt_changes(dbc, get_out_ep(dbc), ctrl & DBC_CTRL_HALT_OUT_TR);
871
872 /* Clear DbC run change bit: */
873 if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
874 writel(ctrl, &dbc->regs->control);
875 ctrl = readl(&dbc->regs->control);
876 }
877 break;
878 default:
879 dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
880 break;
881 }
882
883 /* Handle the events in the event ring: */
884 evt = dbc->ring_evt->dequeue;
885 while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
886 dbc->ring_evt->cycle_state) {
887 /*
888 * Add a barrier between reading the cycle flag and any
889 * reads of the event's flags/data below:
890 */
891 rmb();
892
893 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
894
895 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
896 case TRB_TYPE(TRB_PORT_STATUS):
897 dbc_handle_port_status(dbc, evt);
898 break;
899 case TRB_TYPE(TRB_TRANSFER):
900 dbc_handle_xfer_event(dbc, evt);
901 break;
902 default:
903 break;
904 }
905
906 inc_evt_deq(dbc->ring_evt);
907
908 evt = dbc->ring_evt->dequeue;
909 update_erdp = true;
910 }
911
912 /* Update event ring dequeue pointer: */
913 if (update_erdp) {
914 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
915 dbc->ring_evt->dequeue);
916 lo_hi_writeq(deq, &dbc->regs->erdp);
917 }
918
919 return EVT_DONE;
920 }
921
xhci_dbc_handle_events(struct work_struct * work)922 static void xhci_dbc_handle_events(struct work_struct *work)
923 {
924 enum evtreturn evtr;
925 struct xhci_dbc *dbc;
926 unsigned long flags;
927
928 dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
929
930 spin_lock_irqsave(&dbc->lock, flags);
931 evtr = xhci_dbc_do_handle_events(dbc);
932 spin_unlock_irqrestore(&dbc->lock, flags);
933
934 switch (evtr) {
935 case EVT_GSER:
936 if (dbc->driver->configure)
937 dbc->driver->configure(dbc);
938 break;
939 case EVT_DISC:
940 if (dbc->driver->disconnect)
941 dbc->driver->disconnect(dbc);
942 break;
943 case EVT_DONE:
944 break;
945 default:
946 dev_info(dbc->dev, "stop handling dbc events\n");
947 return;
948 }
949
950 mod_delayed_work(system_wq, &dbc->event_work, 1);
951 }
952
dbc_show(struct device * dev,struct device_attribute * attr,char * buf)953 static ssize_t dbc_show(struct device *dev,
954 struct device_attribute *attr,
955 char *buf)
956 {
957 const char *p;
958 struct xhci_dbc *dbc;
959 struct xhci_hcd *xhci;
960
961 xhci = hcd_to_xhci(dev_get_drvdata(dev));
962 dbc = xhci->dbc;
963
964 switch (dbc->state) {
965 case DS_DISABLED:
966 p = "disabled";
967 break;
968 case DS_INITIALIZED:
969 p = "initialized";
970 break;
971 case DS_ENABLED:
972 p = "enabled";
973 break;
974 case DS_CONNECTED:
975 p = "connected";
976 break;
977 case DS_CONFIGURED:
978 p = "configured";
979 break;
980 default:
981 p = "unknown";
982 }
983
984 return sprintf(buf, "%s\n", p);
985 }
986
dbc_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)987 static ssize_t dbc_store(struct device *dev,
988 struct device_attribute *attr,
989 const char *buf, size_t count)
990 {
991 struct xhci_hcd *xhci;
992 struct xhci_dbc *dbc;
993
994 xhci = hcd_to_xhci(dev_get_drvdata(dev));
995 dbc = xhci->dbc;
996
997 if (!strncmp(buf, "enable", 6))
998 xhci_dbc_start(dbc);
999 else if (!strncmp(buf, "disable", 7))
1000 xhci_dbc_stop(dbc);
1001 else
1002 return -EINVAL;
1003
1004 return count;
1005 }
1006
dbc_idVendor_show(struct device * dev,struct device_attribute * attr,char * buf)1007 static ssize_t dbc_idVendor_show(struct device *dev,
1008 struct device_attribute *attr,
1009 char *buf)
1010 {
1011 struct xhci_dbc *dbc;
1012 struct xhci_hcd *xhci;
1013
1014 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1015 dbc = xhci->dbc;
1016
1017 return sprintf(buf, "%04x\n", dbc->idVendor);
1018 }
1019
dbc_idVendor_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1020 static ssize_t dbc_idVendor_store(struct device *dev,
1021 struct device_attribute *attr,
1022 const char *buf, size_t size)
1023 {
1024 struct xhci_dbc *dbc;
1025 struct xhci_hcd *xhci;
1026 void __iomem *ptr;
1027 u16 value;
1028 u32 dev_info;
1029
1030 if (kstrtou16(buf, 0, &value))
1031 return -EINVAL;
1032
1033 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1034 dbc = xhci->dbc;
1035 if (dbc->state != DS_DISABLED)
1036 return -EBUSY;
1037
1038 dbc->idVendor = value;
1039 ptr = &dbc->regs->devinfo1;
1040 dev_info = readl(ptr);
1041 dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
1042 writel(dev_info, ptr);
1043
1044 return size;
1045 }
1046
dbc_idProduct_show(struct device * dev,struct device_attribute * attr,char * buf)1047 static ssize_t dbc_idProduct_show(struct device *dev,
1048 struct device_attribute *attr,
1049 char *buf)
1050 {
1051 struct xhci_dbc *dbc;
1052 struct xhci_hcd *xhci;
1053
1054 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1055 dbc = xhci->dbc;
1056
1057 return sprintf(buf, "%04x\n", dbc->idProduct);
1058 }
1059
dbc_idProduct_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1060 static ssize_t dbc_idProduct_store(struct device *dev,
1061 struct device_attribute *attr,
1062 const char *buf, size_t size)
1063 {
1064 struct xhci_dbc *dbc;
1065 struct xhci_hcd *xhci;
1066 void __iomem *ptr;
1067 u32 dev_info;
1068 u16 value;
1069
1070 if (kstrtou16(buf, 0, &value))
1071 return -EINVAL;
1072
1073 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1074 dbc = xhci->dbc;
1075 if (dbc->state != DS_DISABLED)
1076 return -EBUSY;
1077
1078 dbc->idProduct = value;
1079 ptr = &dbc->regs->devinfo2;
1080 dev_info = readl(ptr);
1081 dev_info = (dev_info & ~(0xffffu)) | value;
1082 writel(dev_info, ptr);
1083 return size;
1084 }
1085
dbc_bcdDevice_show(struct device * dev,struct device_attribute * attr,char * buf)1086 static ssize_t dbc_bcdDevice_show(struct device *dev,
1087 struct device_attribute *attr,
1088 char *buf)
1089 {
1090 struct xhci_dbc *dbc;
1091 struct xhci_hcd *xhci;
1092
1093 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1094 dbc = xhci->dbc;
1095
1096 return sprintf(buf, "%04x\n", dbc->bcdDevice);
1097 }
1098
dbc_bcdDevice_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1099 static ssize_t dbc_bcdDevice_store(struct device *dev,
1100 struct device_attribute *attr,
1101 const char *buf, size_t size)
1102 {
1103 struct xhci_dbc *dbc;
1104 struct xhci_hcd *xhci;
1105 void __iomem *ptr;
1106 u32 dev_info;
1107 u16 value;
1108
1109 if (kstrtou16(buf, 0, &value))
1110 return -EINVAL;
1111
1112 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1113 dbc = xhci->dbc;
1114 if (dbc->state != DS_DISABLED)
1115 return -EBUSY;
1116
1117 dbc->bcdDevice = value;
1118 ptr = &dbc->regs->devinfo2;
1119 dev_info = readl(ptr);
1120 dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
1121 writel(dev_info, ptr);
1122
1123 return size;
1124 }
1125
dbc_bInterfaceProtocol_show(struct device * dev,struct device_attribute * attr,char * buf)1126 static ssize_t dbc_bInterfaceProtocol_show(struct device *dev,
1127 struct device_attribute *attr,
1128 char *buf)
1129 {
1130 struct xhci_dbc *dbc;
1131 struct xhci_hcd *xhci;
1132
1133 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1134 dbc = xhci->dbc;
1135
1136 return sprintf(buf, "%02x\n", dbc->bInterfaceProtocol);
1137 }
1138
dbc_bInterfaceProtocol_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1139 static ssize_t dbc_bInterfaceProtocol_store(struct device *dev,
1140 struct device_attribute *attr,
1141 const char *buf, size_t size)
1142 {
1143 struct xhci_dbc *dbc;
1144 struct xhci_hcd *xhci;
1145 void __iomem *ptr;
1146 u32 dev_info;
1147 u8 value;
1148 int ret;
1149
1150 /* bInterfaceProtocol is 8 bit, but xhci only supports values 0 and 1 */
1151 ret = kstrtou8(buf, 0, &value);
1152 if (ret || value > 1)
1153 return -EINVAL;
1154
1155 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1156 dbc = xhci->dbc;
1157 if (dbc->state != DS_DISABLED)
1158 return -EBUSY;
1159
1160 dbc->bInterfaceProtocol = value;
1161 ptr = &dbc->regs->devinfo1;
1162 dev_info = readl(ptr);
1163 dev_info = (dev_info & ~(0xffu)) | value;
1164 writel(dev_info, ptr);
1165
1166 return size;
1167 }
1168
1169 static DEVICE_ATTR_RW(dbc);
1170 static DEVICE_ATTR_RW(dbc_idVendor);
1171 static DEVICE_ATTR_RW(dbc_idProduct);
1172 static DEVICE_ATTR_RW(dbc_bcdDevice);
1173 static DEVICE_ATTR_RW(dbc_bInterfaceProtocol);
1174
1175 static struct attribute *dbc_dev_attributes[] = {
1176 &dev_attr_dbc.attr,
1177 &dev_attr_dbc_idVendor.attr,
1178 &dev_attr_dbc_idProduct.attr,
1179 &dev_attr_dbc_bcdDevice.attr,
1180 &dev_attr_dbc_bInterfaceProtocol.attr,
1181 NULL
1182 };
1183
1184 static const struct attribute_group dbc_dev_attrib_grp = {
1185 .attrs = dbc_dev_attributes,
1186 };
1187
1188 struct xhci_dbc *
xhci_alloc_dbc(struct device * dev,void __iomem * base,const struct dbc_driver * driver)1189 xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *driver)
1190 {
1191 struct xhci_dbc *dbc;
1192 int ret;
1193
1194 dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
1195 if (!dbc)
1196 return NULL;
1197
1198 dbc->regs = base;
1199 dbc->dev = dev;
1200 dbc->driver = driver;
1201 dbc->idProduct = DBC_PRODUCT_ID;
1202 dbc->idVendor = DBC_VENDOR_ID;
1203 dbc->bcdDevice = DBC_DEVICE_REV;
1204 dbc->bInterfaceProtocol = DBC_PROTOCOL;
1205
1206 if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE)
1207 goto err;
1208
1209 INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
1210 spin_lock_init(&dbc->lock);
1211
1212 ret = sysfs_create_group(&dev->kobj, &dbc_dev_attrib_grp);
1213 if (ret)
1214 goto err;
1215
1216 return dbc;
1217 err:
1218 kfree(dbc);
1219 return NULL;
1220 }
1221
1222 /* undo what xhci_alloc_dbc() did */
xhci_dbc_remove(struct xhci_dbc * dbc)1223 void xhci_dbc_remove(struct xhci_dbc *dbc)
1224 {
1225 if (!dbc)
1226 return;
1227 /* stop hw, stop wq and call dbc->ops->stop() */
1228 xhci_dbc_stop(dbc);
1229
1230 /* remove sysfs files */
1231 sysfs_remove_group(&dbc->dev->kobj, &dbc_dev_attrib_grp);
1232
1233 kfree(dbc);
1234 }
1235
1236
xhci_create_dbc_dev(struct xhci_hcd * xhci)1237 int xhci_create_dbc_dev(struct xhci_hcd *xhci)
1238 {
1239 struct device *dev;
1240 void __iomem *base;
1241 int ret;
1242 int dbc_cap_offs;
1243
1244 /* create all parameters needed resembling a dbc device */
1245 dev = xhci_to_hcd(xhci)->self.controller;
1246 base = &xhci->cap_regs->hc_capbase;
1247
1248 dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
1249 if (!dbc_cap_offs)
1250 return -ENODEV;
1251
1252 /* already allocated and in use */
1253 if (xhci->dbc)
1254 return -EBUSY;
1255
1256 ret = xhci_dbc_tty_probe(dev, base + dbc_cap_offs, xhci);
1257
1258 return ret;
1259 }
1260
xhci_remove_dbc_dev(struct xhci_hcd * xhci)1261 void xhci_remove_dbc_dev(struct xhci_hcd *xhci)
1262 {
1263 unsigned long flags;
1264
1265 if (!xhci->dbc)
1266 return;
1267
1268 xhci_dbc_tty_remove(xhci->dbc);
1269 spin_lock_irqsave(&xhci->lock, flags);
1270 xhci->dbc = NULL;
1271 spin_unlock_irqrestore(&xhci->lock, flags);
1272 }
1273
1274 #ifdef CONFIG_PM
xhci_dbc_suspend(struct xhci_hcd * xhci)1275 int xhci_dbc_suspend(struct xhci_hcd *xhci)
1276 {
1277 struct xhci_dbc *dbc = xhci->dbc;
1278
1279 if (!dbc)
1280 return 0;
1281
1282 if (dbc->state == DS_CONFIGURED)
1283 dbc->resume_required = 1;
1284
1285 xhci_dbc_stop(dbc);
1286
1287 return 0;
1288 }
1289
xhci_dbc_resume(struct xhci_hcd * xhci)1290 int xhci_dbc_resume(struct xhci_hcd *xhci)
1291 {
1292 int ret = 0;
1293 struct xhci_dbc *dbc = xhci->dbc;
1294
1295 if (!dbc)
1296 return 0;
1297
1298 if (dbc->resume_required) {
1299 dbc->resume_required = 0;
1300 xhci_dbc_start(dbc);
1301 }
1302
1303 return ret;
1304 }
1305 #endif /* CONFIG_PM */
1306
xhci_dbc_init(void)1307 int xhci_dbc_init(void)
1308 {
1309 return dbc_tty_init();
1310 }
1311
xhci_dbc_exit(void)1312 void xhci_dbc_exit(void)
1313 {
1314 dbc_tty_exit();
1315 }
1316