1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * xhci-dbgcap.c - xHCI debug capability support
4 *
5 * Copyright (C) 2017 Intel Corporation
6 *
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
8 */
9 #include <linux/dma-mapping.h>
10 #include <linux/slab.h>
11 #include <linux/nls.h>
12
13 #include "xhci.h"
14 #include "xhci-trace.h"
15 #include "xhci-dbgcap.h"
16
dbc_free_ctx(struct device * dev,struct xhci_container_ctx * ctx)17 static void dbc_free_ctx(struct device *dev, struct xhci_container_ctx *ctx)
18 {
19 if (!ctx)
20 return;
21 dma_free_coherent(dev, ctx->size, ctx->bytes, ctx->dma);
22 kfree(ctx);
23 }
24
25 /* we use only one segment for DbC rings */
dbc_ring_free(struct device * dev,struct xhci_ring * ring)26 static void dbc_ring_free(struct device *dev, struct xhci_ring *ring)
27 {
28 if (!ring)
29 return;
30
31 if (ring->first_seg && ring->first_seg->trbs) {
32 dma_free_coherent(dev, TRB_SEGMENT_SIZE,
33 ring->first_seg->trbs,
34 ring->first_seg->dma);
35 kfree(ring->first_seg);
36 }
37 kfree(ring);
38 }
39
xhci_dbc_populate_strings(struct dbc_str_descs * strings)40 static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
41 {
42 struct usb_string_descriptor *s_desc;
43 u32 string_length;
44
45 /* Serial string: */
46 s_desc = (struct usb_string_descriptor *)strings->serial;
47 utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
48 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
49 DBC_MAX_STRING_LENGTH);
50
51 s_desc->bLength = (strlen(DBC_STRING_SERIAL) + 1) * 2;
52 s_desc->bDescriptorType = USB_DT_STRING;
53 string_length = s_desc->bLength;
54 string_length <<= 8;
55
56 /* Product string: */
57 s_desc = (struct usb_string_descriptor *)strings->product;
58 utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
59 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
60 DBC_MAX_STRING_LENGTH);
61
62 s_desc->bLength = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
63 s_desc->bDescriptorType = USB_DT_STRING;
64 string_length += s_desc->bLength;
65 string_length <<= 8;
66
67 /* Manufacture string: */
68 s_desc = (struct usb_string_descriptor *)strings->manufacturer;
69 utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
70 strlen(DBC_STRING_MANUFACTURER),
71 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
72 DBC_MAX_STRING_LENGTH);
73
74 s_desc->bLength = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
75 s_desc->bDescriptorType = USB_DT_STRING;
76 string_length += s_desc->bLength;
77 string_length <<= 8;
78
79 /* String0: */
80 strings->string0[0] = 4;
81 strings->string0[1] = USB_DT_STRING;
82 strings->string0[2] = 0x09;
83 strings->string0[3] = 0x04;
84 string_length += 4;
85
86 return string_length;
87 }
88
xhci_dbc_init_contexts(struct xhci_dbc * dbc,u32 string_length)89 static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
90 {
91 struct dbc_info_context *info;
92 struct xhci_ep_ctx *ep_ctx;
93 u32 dev_info;
94 dma_addr_t deq, dma;
95 unsigned int max_burst;
96
97 if (!dbc)
98 return;
99
100 /* Populate info Context: */
101 info = (struct dbc_info_context *)dbc->ctx->bytes;
102 dma = dbc->string_dma;
103 info->string0 = cpu_to_le64(dma);
104 info->manufacturer = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
105 info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
106 info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
107 info->length = cpu_to_le32(string_length);
108
109 /* Populate bulk out endpoint context: */
110 ep_ctx = dbc_bulkout_ctx(dbc);
111 max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
112 deq = dbc_bulkout_enq(dbc);
113 ep_ctx->ep_info = 0;
114 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
115 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
116
117 /* Populate bulk in endpoint context: */
118 ep_ctx = dbc_bulkin_ctx(dbc);
119 deq = dbc_bulkin_enq(dbc);
120 ep_ctx->ep_info = 0;
121 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
122 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
123
124 /* Set DbC context and info registers: */
125 lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
126
127 dev_info = (dbc->idVendor << 16) | dbc->bInterfaceProtocol;
128 writel(dev_info, &dbc->regs->devinfo1);
129
130 dev_info = (dbc->bcdDevice << 16) | dbc->idProduct;
131 writel(dev_info, &dbc->regs->devinfo2);
132 }
133
xhci_dbc_giveback(struct dbc_request * req,int status)134 static void xhci_dbc_giveback(struct dbc_request *req, int status)
135 __releases(&dbc->lock)
136 __acquires(&dbc->lock)
137 {
138 struct xhci_dbc *dbc = req->dbc;
139 struct device *dev = dbc->dev;
140
141 list_del_init(&req->list_pending);
142 req->trb_dma = 0;
143 req->trb = NULL;
144
145 if (req->status == -EINPROGRESS)
146 req->status = status;
147
148 trace_xhci_dbc_giveback_request(req);
149
150 dma_unmap_single(dev,
151 req->dma,
152 req->length,
153 dbc_ep_dma_direction(req));
154
155 /* Give back the transfer request: */
156 spin_unlock(&dbc->lock);
157 req->complete(dbc, req);
158 spin_lock(&dbc->lock);
159 }
160
trb_to_noop(union xhci_trb * trb)161 static void trb_to_noop(union xhci_trb *trb)
162 {
163 trb->generic.field[0] = 0;
164 trb->generic.field[1] = 0;
165 trb->generic.field[2] = 0;
166 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
167 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
168 }
169
xhci_dbc_flush_single_request(struct dbc_request * req)170 static void xhci_dbc_flush_single_request(struct dbc_request *req)
171 {
172 trb_to_noop(req->trb);
173 xhci_dbc_giveback(req, -ESHUTDOWN);
174 }
175
xhci_dbc_flush_endpoint_requests(struct dbc_ep * dep)176 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
177 {
178 struct dbc_request *req, *tmp;
179
180 list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
181 xhci_dbc_flush_single_request(req);
182 }
183
xhci_dbc_flush_requests(struct xhci_dbc * dbc)184 static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
185 {
186 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
187 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
188 }
189
190 struct dbc_request *
dbc_alloc_request(struct xhci_dbc * dbc,unsigned int direction,gfp_t flags)191 dbc_alloc_request(struct xhci_dbc *dbc, unsigned int direction, gfp_t flags)
192 {
193 struct dbc_request *req;
194
195 if (direction != BULK_IN &&
196 direction != BULK_OUT)
197 return NULL;
198
199 if (!dbc)
200 return NULL;
201
202 req = kzalloc(sizeof(*req), flags);
203 if (!req)
204 return NULL;
205
206 req->dbc = dbc;
207 INIT_LIST_HEAD(&req->list_pending);
208 INIT_LIST_HEAD(&req->list_pool);
209 req->direction = direction;
210
211 trace_xhci_dbc_alloc_request(req);
212
213 return req;
214 }
215
216 void
dbc_free_request(struct dbc_request * req)217 dbc_free_request(struct dbc_request *req)
218 {
219 trace_xhci_dbc_free_request(req);
220
221 kfree(req);
222 }
223
224 static void
xhci_dbc_queue_trb(struct xhci_ring * ring,u32 field1,u32 field2,u32 field3,u32 field4)225 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
226 u32 field2, u32 field3, u32 field4)
227 {
228 union xhci_trb *trb, *next;
229
230 trb = ring->enqueue;
231 trb->generic.field[0] = cpu_to_le32(field1);
232 trb->generic.field[1] = cpu_to_le32(field2);
233 trb->generic.field[2] = cpu_to_le32(field3);
234 trb->generic.field[3] = cpu_to_le32(field4);
235
236 trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
237
238 ring->num_trbs_free--;
239 next = ++(ring->enqueue);
240 if (TRB_TYPE_LINK_LE32(next->link.control)) {
241 next->link.control ^= cpu_to_le32(TRB_CYCLE);
242 ring->enqueue = ring->enq_seg->trbs;
243 ring->cycle_state ^= 1;
244 }
245 }
246
xhci_dbc_queue_bulk_tx(struct dbc_ep * dep,struct dbc_request * req)247 static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
248 struct dbc_request *req)
249 {
250 u64 addr;
251 union xhci_trb *trb;
252 unsigned int num_trbs;
253 struct xhci_dbc *dbc = req->dbc;
254 struct xhci_ring *ring = dep->ring;
255 u32 length, control, cycle;
256
257 num_trbs = count_trbs(req->dma, req->length);
258 WARN_ON(num_trbs != 1);
259 if (ring->num_trbs_free < num_trbs)
260 return -EBUSY;
261
262 addr = req->dma;
263 trb = ring->enqueue;
264 cycle = ring->cycle_state;
265 length = TRB_LEN(req->length);
266 control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
267
268 if (cycle)
269 control &= cpu_to_le32(~TRB_CYCLE);
270 else
271 control |= cpu_to_le32(TRB_CYCLE);
272
273 req->trb = ring->enqueue;
274 req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
275 xhci_dbc_queue_trb(ring,
276 lower_32_bits(addr),
277 upper_32_bits(addr),
278 length, control);
279
280 /*
281 * Add a barrier between writes of trb fields and flipping
282 * the cycle bit:
283 */
284 wmb();
285
286 if (cycle)
287 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
288 else
289 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
290
291 writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
292
293 return 0;
294 }
295
296 static int
dbc_ep_do_queue(struct dbc_request * req)297 dbc_ep_do_queue(struct dbc_request *req)
298 {
299 int ret;
300 struct xhci_dbc *dbc = req->dbc;
301 struct device *dev = dbc->dev;
302 struct dbc_ep *dep = &dbc->eps[req->direction];
303
304 if (!req->length || !req->buf)
305 return -EINVAL;
306
307 req->actual = 0;
308 req->status = -EINPROGRESS;
309
310 req->dma = dma_map_single(dev,
311 req->buf,
312 req->length,
313 dbc_ep_dma_direction(dep));
314 if (dma_mapping_error(dev, req->dma)) {
315 dev_err(dbc->dev, "failed to map buffer\n");
316 return -EFAULT;
317 }
318
319 ret = xhci_dbc_queue_bulk_tx(dep, req);
320 if (ret) {
321 dev_err(dbc->dev, "failed to queue trbs\n");
322 dma_unmap_single(dev,
323 req->dma,
324 req->length,
325 dbc_ep_dma_direction(dep));
326 return -EFAULT;
327 }
328
329 list_add_tail(&req->list_pending, &dep->list_pending);
330
331 return 0;
332 }
333
dbc_ep_queue(struct dbc_request * req)334 int dbc_ep_queue(struct dbc_request *req)
335 {
336 unsigned long flags;
337 struct xhci_dbc *dbc = req->dbc;
338 int ret = -ESHUTDOWN;
339
340 if (!dbc)
341 return -ENODEV;
342
343 if (req->direction != BULK_IN &&
344 req->direction != BULK_OUT)
345 return -EINVAL;
346
347 spin_lock_irqsave(&dbc->lock, flags);
348 if (dbc->state == DS_CONFIGURED)
349 ret = dbc_ep_do_queue(req);
350 spin_unlock_irqrestore(&dbc->lock, flags);
351
352 mod_delayed_work(system_wq, &dbc->event_work, 0);
353
354 trace_xhci_dbc_queue_request(req);
355
356 return ret;
357 }
358
xhci_dbc_do_eps_init(struct xhci_dbc * dbc,bool direction)359 static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction)
360 {
361 struct dbc_ep *dep;
362
363 dep = &dbc->eps[direction];
364 dep->dbc = dbc;
365 dep->direction = direction;
366 dep->ring = direction ? dbc->ring_in : dbc->ring_out;
367
368 INIT_LIST_HEAD(&dep->list_pending);
369 }
370
xhci_dbc_eps_init(struct xhci_dbc * dbc)371 static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
372 {
373 xhci_dbc_do_eps_init(dbc, BULK_OUT);
374 xhci_dbc_do_eps_init(dbc, BULK_IN);
375 }
376
xhci_dbc_eps_exit(struct xhci_dbc * dbc)377 static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
378 {
379 memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
380 }
381
dbc_erst_alloc(struct device * dev,struct xhci_ring * evt_ring,struct xhci_erst * erst,gfp_t flags)382 static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
383 struct xhci_erst *erst, gfp_t flags)
384 {
385 erst->entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry),
386 &erst->erst_dma_addr, flags);
387 if (!erst->entries)
388 return -ENOMEM;
389
390 erst->num_entries = 1;
391 erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
392 erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
393 erst->entries[0].rsvd = 0;
394 return 0;
395 }
396
dbc_erst_free(struct device * dev,struct xhci_erst * erst)397 static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
398 {
399 if (erst->entries)
400 dma_free_coherent(dev, sizeof(struct xhci_erst_entry),
401 erst->entries, erst->erst_dma_addr);
402 erst->entries = NULL;
403 }
404
405 static struct xhci_container_ctx *
dbc_alloc_ctx(struct device * dev,gfp_t flags)406 dbc_alloc_ctx(struct device *dev, gfp_t flags)
407 {
408 struct xhci_container_ctx *ctx;
409
410 ctx = kzalloc(sizeof(*ctx), flags);
411 if (!ctx)
412 return NULL;
413
414 /* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/
415 ctx->size = 3 * DBC_CONTEXT_SIZE;
416 ctx->bytes = dma_alloc_coherent(dev, ctx->size, &ctx->dma, flags);
417 if (!ctx->bytes) {
418 kfree(ctx);
419 return NULL;
420 }
421 return ctx;
422 }
423
424 static struct xhci_ring *
xhci_dbc_ring_alloc(struct device * dev,enum xhci_ring_type type,gfp_t flags)425 xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
426 {
427 struct xhci_ring *ring;
428 struct xhci_segment *seg;
429 dma_addr_t dma;
430
431 ring = kzalloc(sizeof(*ring), flags);
432 if (!ring)
433 return NULL;
434
435 ring->num_segs = 1;
436 ring->type = type;
437
438 seg = kzalloc(sizeof(*seg), flags);
439 if (!seg)
440 goto seg_fail;
441
442 ring->first_seg = seg;
443 ring->last_seg = seg;
444 seg->next = seg;
445
446 seg->trbs = dma_alloc_coherent(dev, TRB_SEGMENT_SIZE, &dma, flags);
447 if (!seg->trbs)
448 goto dma_fail;
449
450 seg->dma = dma;
451
452 /* Only event ring does not use link TRB */
453 if (type != TYPE_EVENT) {
454 union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
455
456 trb->link.segment_ptr = cpu_to_le64(dma);
457 trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
458 }
459 INIT_LIST_HEAD(&ring->td_list);
460 xhci_initialize_ring_info(ring, 1);
461 return ring;
462 dma_fail:
463 kfree(seg);
464 seg_fail:
465 kfree(ring);
466 return NULL;
467 }
468
xhci_dbc_mem_init(struct xhci_dbc * dbc,gfp_t flags)469 static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags)
470 {
471 int ret;
472 dma_addr_t deq;
473 u32 string_length;
474 struct device *dev = dbc->dev;
475
476 /* Allocate various rings for events and transfers: */
477 dbc->ring_evt = xhci_dbc_ring_alloc(dev, TYPE_EVENT, flags);
478 if (!dbc->ring_evt)
479 goto evt_fail;
480
481 dbc->ring_in = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
482 if (!dbc->ring_in)
483 goto in_fail;
484
485 dbc->ring_out = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
486 if (!dbc->ring_out)
487 goto out_fail;
488
489 /* Allocate and populate ERST: */
490 ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
491 if (ret)
492 goto erst_fail;
493
494 /* Allocate context data structure: */
495 dbc->ctx = dbc_alloc_ctx(dev, flags); /* was sysdev, and is still */
496 if (!dbc->ctx)
497 goto ctx_fail;
498
499 /* Allocate the string table: */
500 dbc->string_size = sizeof(struct dbc_str_descs);
501 dbc->string = dma_alloc_coherent(dev, dbc->string_size,
502 &dbc->string_dma, flags);
503 if (!dbc->string)
504 goto string_fail;
505
506 /* Setup ERST register: */
507 writel(dbc->erst.erst_size, &dbc->regs->ersts);
508
509 lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
510 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
511 dbc->ring_evt->dequeue);
512 lo_hi_writeq(deq, &dbc->regs->erdp);
513
514 /* Setup strings and contexts: */
515 string_length = xhci_dbc_populate_strings(dbc->string);
516 xhci_dbc_init_contexts(dbc, string_length);
517
518 xhci_dbc_eps_init(dbc);
519 dbc->state = DS_INITIALIZED;
520
521 return 0;
522
523 string_fail:
524 dbc_free_ctx(dev, dbc->ctx);
525 dbc->ctx = NULL;
526 ctx_fail:
527 dbc_erst_free(dev, &dbc->erst);
528 erst_fail:
529 dbc_ring_free(dev, dbc->ring_out);
530 dbc->ring_out = NULL;
531 out_fail:
532 dbc_ring_free(dev, dbc->ring_in);
533 dbc->ring_in = NULL;
534 in_fail:
535 dbc_ring_free(dev, dbc->ring_evt);
536 dbc->ring_evt = NULL;
537 evt_fail:
538 return -ENOMEM;
539 }
540
xhci_dbc_mem_cleanup(struct xhci_dbc * dbc)541 static void xhci_dbc_mem_cleanup(struct xhci_dbc *dbc)
542 {
543 if (!dbc)
544 return;
545
546 xhci_dbc_eps_exit(dbc);
547
548 if (dbc->string) {
549 dma_free_coherent(dbc->dev, dbc->string_size,
550 dbc->string, dbc->string_dma);
551 dbc->string = NULL;
552 }
553
554 dbc_free_ctx(dbc->dev, dbc->ctx);
555 dbc->ctx = NULL;
556
557 dbc_erst_free(dbc->dev, &dbc->erst);
558 dbc_ring_free(dbc->dev, dbc->ring_out);
559 dbc_ring_free(dbc->dev, dbc->ring_in);
560 dbc_ring_free(dbc->dev, dbc->ring_evt);
561 dbc->ring_in = NULL;
562 dbc->ring_out = NULL;
563 dbc->ring_evt = NULL;
564 }
565
xhci_do_dbc_start(struct xhci_dbc * dbc)566 static int xhci_do_dbc_start(struct xhci_dbc *dbc)
567 {
568 int ret;
569 u32 ctrl;
570
571 if (dbc->state != DS_DISABLED)
572 return -EINVAL;
573
574 writel(0, &dbc->regs->control);
575 ret = xhci_handshake(&dbc->regs->control,
576 DBC_CTRL_DBC_ENABLE,
577 0, 1000);
578 if (ret)
579 return ret;
580
581 ret = xhci_dbc_mem_init(dbc, GFP_ATOMIC);
582 if (ret)
583 return ret;
584
585 ctrl = readl(&dbc->regs->control);
586 writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
587 &dbc->regs->control);
588 ret = xhci_handshake(&dbc->regs->control,
589 DBC_CTRL_DBC_ENABLE,
590 DBC_CTRL_DBC_ENABLE, 1000);
591 if (ret)
592 return ret;
593
594 dbc->state = DS_ENABLED;
595
596 return 0;
597 }
598
xhci_do_dbc_stop(struct xhci_dbc * dbc)599 static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
600 {
601 if (dbc->state == DS_DISABLED)
602 return -1;
603
604 writel(0, &dbc->regs->control);
605 dbc->state = DS_DISABLED;
606
607 return 0;
608 }
609
xhci_dbc_start(struct xhci_dbc * dbc)610 static int xhci_dbc_start(struct xhci_dbc *dbc)
611 {
612 int ret;
613 unsigned long flags;
614
615 WARN_ON(!dbc);
616
617 pm_runtime_get_sync(dbc->dev); /* note this was self.controller */
618
619 spin_lock_irqsave(&dbc->lock, flags);
620 ret = xhci_do_dbc_start(dbc);
621 spin_unlock_irqrestore(&dbc->lock, flags);
622
623 if (ret) {
624 pm_runtime_put(dbc->dev); /* note this was self.controller */
625 return ret;
626 }
627
628 return mod_delayed_work(system_wq, &dbc->event_work, 1);
629 }
630
xhci_dbc_stop(struct xhci_dbc * dbc)631 static void xhci_dbc_stop(struct xhci_dbc *dbc)
632 {
633 int ret;
634 unsigned long flags;
635
636 WARN_ON(!dbc);
637
638 switch (dbc->state) {
639 case DS_DISABLED:
640 return;
641 case DS_CONFIGURED:
642 if (dbc->driver->disconnect)
643 dbc->driver->disconnect(dbc);
644 break;
645 default:
646 break;
647 }
648
649 cancel_delayed_work_sync(&dbc->event_work);
650
651 spin_lock_irqsave(&dbc->lock, flags);
652 ret = xhci_do_dbc_stop(dbc);
653 spin_unlock_irqrestore(&dbc->lock, flags);
654
655 if (!ret) {
656 xhci_dbc_mem_cleanup(dbc);
657 pm_runtime_put_sync(dbc->dev); /* note, was self.controller */
658 }
659 }
660
661 static void
handle_ep_halt_changes(struct xhci_dbc * dbc,struct dbc_ep * dep,bool halted)662 handle_ep_halt_changes(struct xhci_dbc *dbc, struct dbc_ep *dep, bool halted)
663 {
664 if (halted) {
665 dev_info(dbc->dev, "DbC Endpoint halted\n");
666 dep->halted = 1;
667
668 } else if (dep->halted) {
669 dev_info(dbc->dev, "DbC Endpoint halt cleared\n");
670 dep->halted = 0;
671
672 if (!list_empty(&dep->list_pending))
673 writel(DBC_DOOR_BELL_TARGET(dep->direction),
674 &dbc->regs->doorbell);
675 }
676 }
677
678 static void
dbc_handle_port_status(struct xhci_dbc * dbc,union xhci_trb * event)679 dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
680 {
681 u32 portsc;
682
683 portsc = readl(&dbc->regs->portsc);
684 if (portsc & DBC_PORTSC_CONN_CHANGE)
685 dev_info(dbc->dev, "DbC port connect change\n");
686
687 if (portsc & DBC_PORTSC_RESET_CHANGE)
688 dev_info(dbc->dev, "DbC port reset change\n");
689
690 if (portsc & DBC_PORTSC_LINK_CHANGE)
691 dev_info(dbc->dev, "DbC port link status change\n");
692
693 if (portsc & DBC_PORTSC_CONFIG_CHANGE)
694 dev_info(dbc->dev, "DbC config error change\n");
695
696 /* Port reset change bit will be cleared in other place: */
697 writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
698 }
699
dbc_handle_xfer_event(struct xhci_dbc * dbc,union xhci_trb * event)700 static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
701 {
702 struct dbc_ep *dep;
703 struct xhci_ring *ring;
704 int ep_id;
705 int status;
706 struct xhci_ep_ctx *ep_ctx;
707 u32 comp_code;
708 size_t remain_length;
709 struct dbc_request *req = NULL, *r;
710
711 comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
712 remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
713 ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
714 dep = (ep_id == EPID_OUT) ?
715 get_out_ep(dbc) : get_in_ep(dbc);
716 ep_ctx = (ep_id == EPID_OUT) ?
717 dbc_bulkout_ctx(dbc) : dbc_bulkin_ctx(dbc);
718 ring = dep->ring;
719
720 /* Match the pending request: */
721 list_for_each_entry(r, &dep->list_pending, list_pending) {
722 if (r->trb_dma == event->trans_event.buffer) {
723 req = r;
724 break;
725 }
726 if (r->status == -COMP_STALL_ERROR) {
727 dev_warn(dbc->dev, "Give back stale stalled req\n");
728 ring->num_trbs_free++;
729 xhci_dbc_giveback(r, 0);
730 }
731 }
732
733 if (!req) {
734 dev_warn(dbc->dev, "no matched request\n");
735 return;
736 }
737
738 trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
739
740 switch (comp_code) {
741 case COMP_SUCCESS:
742 remain_length = 0;
743 fallthrough;
744 case COMP_SHORT_PACKET:
745 status = 0;
746 break;
747 case COMP_TRB_ERROR:
748 case COMP_BABBLE_DETECTED_ERROR:
749 case COMP_USB_TRANSACTION_ERROR:
750 dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
751 status = -comp_code;
752 break;
753 case COMP_STALL_ERROR:
754 dev_warn(dbc->dev, "Stall error at bulk TRB %llx, remaining %zu, ep deq %llx\n",
755 event->trans_event.buffer, remain_length, ep_ctx->deq);
756 status = 0;
757 dep->halted = 1;
758
759 /*
760 * xHC DbC may trigger a STALL bulk xfer event when host sends a
761 * ClearFeature(ENDPOINT_HALT) request even if there wasn't an
762 * active bulk transfer.
763 *
764 * Don't give back this transfer request as hardware will later
765 * start processing TRBs starting from this 'STALLED' TRB,
766 * causing TRBs and requests to be out of sync.
767 *
768 * If STALL event shows some bytes were transferred then assume
769 * it's an actual transfer issue and give back the request.
770 * In this case mark the TRB as No-Op to avoid hw from using the
771 * TRB again.
772 */
773
774 if ((ep_ctx->deq & ~TRB_CYCLE) == event->trans_event.buffer) {
775 dev_dbg(dbc->dev, "Ep stopped on Stalled TRB\n");
776 if (remain_length == req->length) {
777 dev_dbg(dbc->dev, "Spurious stall event, keep req\n");
778 req->status = -COMP_STALL_ERROR;
779 req->actual = 0;
780 return;
781 }
782 dev_dbg(dbc->dev, "Give back stalled req, but turn TRB to No-op\n");
783 trb_to_noop(req->trb);
784 }
785 break;
786
787 default:
788 dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
789 status = -comp_code;
790 break;
791 }
792
793 ring->num_trbs_free++;
794 req->actual = req->length - remain_length;
795 xhci_dbc_giveback(req, status);
796 }
797
inc_evt_deq(struct xhci_ring * ring)798 static void inc_evt_deq(struct xhci_ring *ring)
799 {
800 /* If on the last TRB of the segment go back to the beginning */
801 if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
802 ring->cycle_state ^= 1;
803 ring->dequeue = ring->deq_seg->trbs;
804 return;
805 }
806 ring->dequeue++;
807 }
808
xhci_dbc_do_handle_events(struct xhci_dbc * dbc)809 static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
810 {
811 dma_addr_t deq;
812 union xhci_trb *evt;
813 u32 ctrl, portsc;
814 bool update_erdp = false;
815
816 /* DbC state machine: */
817 switch (dbc->state) {
818 case DS_DISABLED:
819 case DS_INITIALIZED:
820
821 return EVT_ERR;
822 case DS_ENABLED:
823 portsc = readl(&dbc->regs->portsc);
824 if (portsc & DBC_PORTSC_CONN_STATUS) {
825 dbc->state = DS_CONNECTED;
826 dev_info(dbc->dev, "DbC connected\n");
827 }
828
829 return EVT_DONE;
830 case DS_CONNECTED:
831 ctrl = readl(&dbc->regs->control);
832 if (ctrl & DBC_CTRL_DBC_RUN) {
833 dbc->state = DS_CONFIGURED;
834 dev_info(dbc->dev, "DbC configured\n");
835 portsc = readl(&dbc->regs->portsc);
836 writel(portsc, &dbc->regs->portsc);
837 return EVT_GSER;
838 }
839
840 return EVT_DONE;
841 case DS_CONFIGURED:
842 /* Handle cable unplug event: */
843 portsc = readl(&dbc->regs->portsc);
844 if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
845 !(portsc & DBC_PORTSC_CONN_STATUS)) {
846 dev_info(dbc->dev, "DbC cable unplugged\n");
847 dbc->state = DS_ENABLED;
848 xhci_dbc_flush_requests(dbc);
849
850 return EVT_DISC;
851 }
852
853 /* Handle debug port reset event: */
854 if (portsc & DBC_PORTSC_RESET_CHANGE) {
855 dev_info(dbc->dev, "DbC port reset\n");
856 writel(portsc, &dbc->regs->portsc);
857 dbc->state = DS_ENABLED;
858 xhci_dbc_flush_requests(dbc);
859
860 return EVT_DISC;
861 }
862
863 /* Check and handle changes in endpoint halt status */
864 ctrl = readl(&dbc->regs->control);
865 handle_ep_halt_changes(dbc, get_in_ep(dbc), ctrl & DBC_CTRL_HALT_IN_TR);
866 handle_ep_halt_changes(dbc, get_out_ep(dbc), ctrl & DBC_CTRL_HALT_OUT_TR);
867
868 /* Clear DbC run change bit: */
869 if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
870 writel(ctrl, &dbc->regs->control);
871 ctrl = readl(&dbc->regs->control);
872 }
873 break;
874 default:
875 dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
876 break;
877 }
878
879 /* Handle the events in the event ring: */
880 evt = dbc->ring_evt->dequeue;
881 while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
882 dbc->ring_evt->cycle_state) {
883 /*
884 * Add a barrier between reading the cycle flag and any
885 * reads of the event's flags/data below:
886 */
887 rmb();
888
889 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
890
891 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
892 case TRB_TYPE(TRB_PORT_STATUS):
893 dbc_handle_port_status(dbc, evt);
894 break;
895 case TRB_TYPE(TRB_TRANSFER):
896 dbc_handle_xfer_event(dbc, evt);
897 break;
898 default:
899 break;
900 }
901
902 inc_evt_deq(dbc->ring_evt);
903
904 evt = dbc->ring_evt->dequeue;
905 update_erdp = true;
906 }
907
908 /* Update event ring dequeue pointer: */
909 if (update_erdp) {
910 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
911 dbc->ring_evt->dequeue);
912 lo_hi_writeq(deq, &dbc->regs->erdp);
913 }
914
915 return EVT_DONE;
916 }
917
xhci_dbc_handle_events(struct work_struct * work)918 static void xhci_dbc_handle_events(struct work_struct *work)
919 {
920 enum evtreturn evtr;
921 struct xhci_dbc *dbc;
922 unsigned long flags;
923
924 dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
925
926 spin_lock_irqsave(&dbc->lock, flags);
927 evtr = xhci_dbc_do_handle_events(dbc);
928 spin_unlock_irqrestore(&dbc->lock, flags);
929
930 switch (evtr) {
931 case EVT_GSER:
932 if (dbc->driver->configure)
933 dbc->driver->configure(dbc);
934 break;
935 case EVT_DISC:
936 if (dbc->driver->disconnect)
937 dbc->driver->disconnect(dbc);
938 break;
939 case EVT_DONE:
940 break;
941 default:
942 dev_info(dbc->dev, "stop handling dbc events\n");
943 return;
944 }
945
946 mod_delayed_work(system_wq, &dbc->event_work, 1);
947 }
948
dbc_show(struct device * dev,struct device_attribute * attr,char * buf)949 static ssize_t dbc_show(struct device *dev,
950 struct device_attribute *attr,
951 char *buf)
952 {
953 const char *p;
954 struct xhci_dbc *dbc;
955 struct xhci_hcd *xhci;
956
957 xhci = hcd_to_xhci(dev_get_drvdata(dev));
958 dbc = xhci->dbc;
959
960 switch (dbc->state) {
961 case DS_DISABLED:
962 p = "disabled";
963 break;
964 case DS_INITIALIZED:
965 p = "initialized";
966 break;
967 case DS_ENABLED:
968 p = "enabled";
969 break;
970 case DS_CONNECTED:
971 p = "connected";
972 break;
973 case DS_CONFIGURED:
974 p = "configured";
975 break;
976 default:
977 p = "unknown";
978 }
979
980 return sprintf(buf, "%s\n", p);
981 }
982
dbc_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)983 static ssize_t dbc_store(struct device *dev,
984 struct device_attribute *attr,
985 const char *buf, size_t count)
986 {
987 struct xhci_hcd *xhci;
988 struct xhci_dbc *dbc;
989
990 xhci = hcd_to_xhci(dev_get_drvdata(dev));
991 dbc = xhci->dbc;
992
993 if (!strncmp(buf, "enable", 6))
994 xhci_dbc_start(dbc);
995 else if (!strncmp(buf, "disable", 7))
996 xhci_dbc_stop(dbc);
997 else
998 return -EINVAL;
999
1000 return count;
1001 }
1002
dbc_idVendor_show(struct device * dev,struct device_attribute * attr,char * buf)1003 static ssize_t dbc_idVendor_show(struct device *dev,
1004 struct device_attribute *attr,
1005 char *buf)
1006 {
1007 struct xhci_dbc *dbc;
1008 struct xhci_hcd *xhci;
1009
1010 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1011 dbc = xhci->dbc;
1012
1013 return sprintf(buf, "%04x\n", dbc->idVendor);
1014 }
1015
dbc_idVendor_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1016 static ssize_t dbc_idVendor_store(struct device *dev,
1017 struct device_attribute *attr,
1018 const char *buf, size_t size)
1019 {
1020 struct xhci_dbc *dbc;
1021 struct xhci_hcd *xhci;
1022 void __iomem *ptr;
1023 u16 value;
1024 u32 dev_info;
1025
1026 if (kstrtou16(buf, 0, &value))
1027 return -EINVAL;
1028
1029 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1030 dbc = xhci->dbc;
1031 if (dbc->state != DS_DISABLED)
1032 return -EBUSY;
1033
1034 dbc->idVendor = value;
1035 ptr = &dbc->regs->devinfo1;
1036 dev_info = readl(ptr);
1037 dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
1038 writel(dev_info, ptr);
1039
1040 return size;
1041 }
1042
dbc_idProduct_show(struct device * dev,struct device_attribute * attr,char * buf)1043 static ssize_t dbc_idProduct_show(struct device *dev,
1044 struct device_attribute *attr,
1045 char *buf)
1046 {
1047 struct xhci_dbc *dbc;
1048 struct xhci_hcd *xhci;
1049
1050 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1051 dbc = xhci->dbc;
1052
1053 return sprintf(buf, "%04x\n", dbc->idProduct);
1054 }
1055
dbc_idProduct_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1056 static ssize_t dbc_idProduct_store(struct device *dev,
1057 struct device_attribute *attr,
1058 const char *buf, size_t size)
1059 {
1060 struct xhci_dbc *dbc;
1061 struct xhci_hcd *xhci;
1062 void __iomem *ptr;
1063 u32 dev_info;
1064 u16 value;
1065
1066 if (kstrtou16(buf, 0, &value))
1067 return -EINVAL;
1068
1069 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1070 dbc = xhci->dbc;
1071 if (dbc->state != DS_DISABLED)
1072 return -EBUSY;
1073
1074 dbc->idProduct = value;
1075 ptr = &dbc->regs->devinfo2;
1076 dev_info = readl(ptr);
1077 dev_info = (dev_info & ~(0xffffu)) | value;
1078 writel(dev_info, ptr);
1079 return size;
1080 }
1081
dbc_bcdDevice_show(struct device * dev,struct device_attribute * attr,char * buf)1082 static ssize_t dbc_bcdDevice_show(struct device *dev,
1083 struct device_attribute *attr,
1084 char *buf)
1085 {
1086 struct xhci_dbc *dbc;
1087 struct xhci_hcd *xhci;
1088
1089 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1090 dbc = xhci->dbc;
1091
1092 return sprintf(buf, "%04x\n", dbc->bcdDevice);
1093 }
1094
dbc_bcdDevice_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1095 static ssize_t dbc_bcdDevice_store(struct device *dev,
1096 struct device_attribute *attr,
1097 const char *buf, size_t size)
1098 {
1099 struct xhci_dbc *dbc;
1100 struct xhci_hcd *xhci;
1101 void __iomem *ptr;
1102 u32 dev_info;
1103 u16 value;
1104
1105 if (kstrtou16(buf, 0, &value))
1106 return -EINVAL;
1107
1108 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1109 dbc = xhci->dbc;
1110 if (dbc->state != DS_DISABLED)
1111 return -EBUSY;
1112
1113 dbc->bcdDevice = value;
1114 ptr = &dbc->regs->devinfo2;
1115 dev_info = readl(ptr);
1116 dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
1117 writel(dev_info, ptr);
1118
1119 return size;
1120 }
1121
dbc_bInterfaceProtocol_show(struct device * dev,struct device_attribute * attr,char * buf)1122 static ssize_t dbc_bInterfaceProtocol_show(struct device *dev,
1123 struct device_attribute *attr,
1124 char *buf)
1125 {
1126 struct xhci_dbc *dbc;
1127 struct xhci_hcd *xhci;
1128
1129 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1130 dbc = xhci->dbc;
1131
1132 return sprintf(buf, "%02x\n", dbc->bInterfaceProtocol);
1133 }
1134
dbc_bInterfaceProtocol_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1135 static ssize_t dbc_bInterfaceProtocol_store(struct device *dev,
1136 struct device_attribute *attr,
1137 const char *buf, size_t size)
1138 {
1139 struct xhci_dbc *dbc;
1140 struct xhci_hcd *xhci;
1141 void __iomem *ptr;
1142 u32 dev_info;
1143 u8 value;
1144 int ret;
1145
1146 /* bInterfaceProtocol is 8 bit, but xhci only supports values 0 and 1 */
1147 ret = kstrtou8(buf, 0, &value);
1148 if (ret || value > 1)
1149 return -EINVAL;
1150
1151 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1152 dbc = xhci->dbc;
1153 if (dbc->state != DS_DISABLED)
1154 return -EBUSY;
1155
1156 dbc->bInterfaceProtocol = value;
1157 ptr = &dbc->regs->devinfo1;
1158 dev_info = readl(ptr);
1159 dev_info = (dev_info & ~(0xffu)) | value;
1160 writel(dev_info, ptr);
1161
1162 return size;
1163 }
1164
1165 static DEVICE_ATTR_RW(dbc);
1166 static DEVICE_ATTR_RW(dbc_idVendor);
1167 static DEVICE_ATTR_RW(dbc_idProduct);
1168 static DEVICE_ATTR_RW(dbc_bcdDevice);
1169 static DEVICE_ATTR_RW(dbc_bInterfaceProtocol);
1170
1171 static struct attribute *dbc_dev_attributes[] = {
1172 &dev_attr_dbc.attr,
1173 &dev_attr_dbc_idVendor.attr,
1174 &dev_attr_dbc_idProduct.attr,
1175 &dev_attr_dbc_bcdDevice.attr,
1176 &dev_attr_dbc_bInterfaceProtocol.attr,
1177 NULL
1178 };
1179
1180 static const struct attribute_group dbc_dev_attrib_grp = {
1181 .attrs = dbc_dev_attributes,
1182 };
1183
1184 struct xhci_dbc *
xhci_alloc_dbc(struct device * dev,void __iomem * base,const struct dbc_driver * driver)1185 xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *driver)
1186 {
1187 struct xhci_dbc *dbc;
1188 int ret;
1189
1190 dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
1191 if (!dbc)
1192 return NULL;
1193
1194 dbc->regs = base;
1195 dbc->dev = dev;
1196 dbc->driver = driver;
1197 dbc->idProduct = DBC_PRODUCT_ID;
1198 dbc->idVendor = DBC_VENDOR_ID;
1199 dbc->bcdDevice = DBC_DEVICE_REV;
1200 dbc->bInterfaceProtocol = DBC_PROTOCOL;
1201
1202 if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE)
1203 goto err;
1204
1205 INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
1206 spin_lock_init(&dbc->lock);
1207
1208 ret = sysfs_create_group(&dev->kobj, &dbc_dev_attrib_grp);
1209 if (ret)
1210 goto err;
1211
1212 return dbc;
1213 err:
1214 kfree(dbc);
1215 return NULL;
1216 }
1217
1218 /* undo what xhci_alloc_dbc() did */
xhci_dbc_remove(struct xhci_dbc * dbc)1219 void xhci_dbc_remove(struct xhci_dbc *dbc)
1220 {
1221 if (!dbc)
1222 return;
1223 /* stop hw, stop wq and call dbc->ops->stop() */
1224 xhci_dbc_stop(dbc);
1225
1226 /* remove sysfs files */
1227 sysfs_remove_group(&dbc->dev->kobj, &dbc_dev_attrib_grp);
1228
1229 kfree(dbc);
1230 }
1231
1232
xhci_create_dbc_dev(struct xhci_hcd * xhci)1233 int xhci_create_dbc_dev(struct xhci_hcd *xhci)
1234 {
1235 struct device *dev;
1236 void __iomem *base;
1237 int ret;
1238 int dbc_cap_offs;
1239
1240 /* create all parameters needed resembling a dbc device */
1241 dev = xhci_to_hcd(xhci)->self.controller;
1242 base = &xhci->cap_regs->hc_capbase;
1243
1244 dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
1245 if (!dbc_cap_offs)
1246 return -ENODEV;
1247
1248 /* already allocated and in use */
1249 if (xhci->dbc)
1250 return -EBUSY;
1251
1252 ret = xhci_dbc_tty_probe(dev, base + dbc_cap_offs, xhci);
1253
1254 return ret;
1255 }
1256
xhci_remove_dbc_dev(struct xhci_hcd * xhci)1257 void xhci_remove_dbc_dev(struct xhci_hcd *xhci)
1258 {
1259 unsigned long flags;
1260
1261 if (!xhci->dbc)
1262 return;
1263
1264 xhci_dbc_tty_remove(xhci->dbc);
1265 spin_lock_irqsave(&xhci->lock, flags);
1266 xhci->dbc = NULL;
1267 spin_unlock_irqrestore(&xhci->lock, flags);
1268 }
1269
1270 #ifdef CONFIG_PM
xhci_dbc_suspend(struct xhci_hcd * xhci)1271 int xhci_dbc_suspend(struct xhci_hcd *xhci)
1272 {
1273 struct xhci_dbc *dbc = xhci->dbc;
1274
1275 if (!dbc)
1276 return 0;
1277
1278 if (dbc->state == DS_CONFIGURED)
1279 dbc->resume_required = 1;
1280
1281 xhci_dbc_stop(dbc);
1282
1283 return 0;
1284 }
1285
xhci_dbc_resume(struct xhci_hcd * xhci)1286 int xhci_dbc_resume(struct xhci_hcd *xhci)
1287 {
1288 int ret = 0;
1289 struct xhci_dbc *dbc = xhci->dbc;
1290
1291 if (!dbc)
1292 return 0;
1293
1294 if (dbc->resume_required) {
1295 dbc->resume_required = 0;
1296 xhci_dbc_start(dbc);
1297 }
1298
1299 return ret;
1300 }
1301 #endif /* CONFIG_PM */
1302
xhci_dbc_init(void)1303 int xhci_dbc_init(void)
1304 {
1305 return dbc_tty_init();
1306 }
1307
xhci_dbc_exit(void)1308 void xhci_dbc_exit(void)
1309 {
1310 dbc_tty_exit();
1311 }
1312