1 // SPDX-License-Identifier: GPL-2.0 2 /** 3 * xhci-dbgcap.c - xHCI debug capability support 4 * 5 * Copyright (C) 2017 Intel Corporation 6 * 7 * Author: Lu Baolu <baolu.lu@linux.intel.com> 8 */ 9 #include <linux/dma-mapping.h> 10 #include <linux/slab.h> 11 #include <linux/nls.h> 12 13 #include "xhci.h" 14 #include "xhci-trace.h" 15 #include "xhci-dbgcap.h" 16 17 static inline void * 18 dbc_dma_alloc_coherent(struct xhci_hcd *xhci, size_t size, 19 dma_addr_t *dma_handle, gfp_t flags) 20 { 21 void *vaddr; 22 23 vaddr = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev, 24 size, dma_handle, flags); 25 memset(vaddr, 0, size); 26 return vaddr; 27 } 28 29 static inline void 30 dbc_dma_free_coherent(struct xhci_hcd *xhci, size_t size, 31 void *cpu_addr, dma_addr_t dma_handle) 32 { 33 if (cpu_addr) 34 dma_free_coherent(xhci_to_hcd(xhci)->self.sysdev, 35 size, cpu_addr, dma_handle); 36 } 37 38 static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings) 39 { 40 struct usb_string_descriptor *s_desc; 41 u32 string_length; 42 43 /* Serial string: */ 44 s_desc = (struct usb_string_descriptor *)strings->serial; 45 utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL), 46 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData, 47 DBC_MAX_STRING_LENGTH); 48 49 s_desc->bLength = (strlen(DBC_STRING_SERIAL) + 1) * 2; 50 s_desc->bDescriptorType = USB_DT_STRING; 51 string_length = s_desc->bLength; 52 string_length <<= 8; 53 54 /* Product string: */ 55 s_desc = (struct usb_string_descriptor *)strings->product; 56 utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT), 57 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData, 58 DBC_MAX_STRING_LENGTH); 59 60 s_desc->bLength = (strlen(DBC_STRING_PRODUCT) + 1) * 2; 61 s_desc->bDescriptorType = USB_DT_STRING; 62 string_length += s_desc->bLength; 63 string_length <<= 8; 64 65 /* Manufacture string: */ 66 s_desc = (struct usb_string_descriptor *)strings->manufacturer; 67 utf8s_to_utf16s(DBC_STRING_MANUFACTURER, 68 strlen(DBC_STRING_MANUFACTURER), 69 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData, 70 DBC_MAX_STRING_LENGTH); 71 72 s_desc->bLength = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2; 73 s_desc->bDescriptorType = USB_DT_STRING; 74 string_length += s_desc->bLength; 75 string_length <<= 8; 76 77 /* String0: */ 78 strings->string0[0] = 4; 79 strings->string0[1] = USB_DT_STRING; 80 strings->string0[2] = 0x09; 81 strings->string0[3] = 0x04; 82 string_length += 4; 83 84 return string_length; 85 } 86 87 static void xhci_dbc_init_contexts(struct xhci_hcd *xhci, u32 string_length) 88 { 89 struct xhci_dbc *dbc; 90 struct dbc_info_context *info; 91 struct xhci_ep_ctx *ep_ctx; 92 u32 dev_info; 93 dma_addr_t deq, dma; 94 unsigned int max_burst; 95 96 dbc = xhci->dbc; 97 if (!dbc) 98 return; 99 100 /* Populate info Context: */ 101 info = (struct dbc_info_context *)dbc->ctx->bytes; 102 dma = dbc->string_dma; 103 info->string0 = cpu_to_le64(dma); 104 info->manufacturer = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH); 105 info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2); 106 info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3); 107 info->length = cpu_to_le32(string_length); 108 109 /* Populate bulk out endpoint context: */ 110 ep_ctx = dbc_bulkout_ctx(dbc); 111 max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control)); 112 deq = dbc_bulkout_enq(dbc); 113 ep_ctx->ep_info = 0; 114 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst); 115 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state); 116 117 /* Populate bulk in endpoint context: */ 118 ep_ctx = dbc_bulkin_ctx(dbc); 119 deq = dbc_bulkin_enq(dbc); 120 ep_ctx->ep_info = 0; 121 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst); 122 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state); 123 124 /* Set DbC context and info registers: */ 125 xhci_write_64(xhci, dbc->ctx->dma, &dbc->regs->dccp); 126 127 dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL); 128 writel(dev_info, &dbc->regs->devinfo1); 129 130 dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID); 131 writel(dev_info, &dbc->regs->devinfo2); 132 } 133 134 static void xhci_dbc_giveback(struct dbc_request *req, int status) 135 __releases(&dbc->lock) 136 __acquires(&dbc->lock) 137 { 138 struct dbc_ep *dep = req->dep; 139 struct xhci_dbc *dbc = dep->dbc; 140 struct xhci_hcd *xhci = dbc->xhci; 141 struct device *dev = xhci_to_hcd(dbc->xhci)->self.sysdev; 142 143 list_del_init(&req->list_pending); 144 req->trb_dma = 0; 145 req->trb = NULL; 146 147 if (req->status == -EINPROGRESS) 148 req->status = status; 149 150 trace_xhci_dbc_giveback_request(req); 151 152 dma_unmap_single(dev, 153 req->dma, 154 req->length, 155 dbc_ep_dma_direction(dep)); 156 157 /* Give back the transfer request: */ 158 spin_unlock(&dbc->lock); 159 req->complete(xhci, req); 160 spin_lock(&dbc->lock); 161 } 162 163 static void xhci_dbc_flush_single_request(struct dbc_request *req) 164 { 165 union xhci_trb *trb = req->trb; 166 167 trb->generic.field[0] = 0; 168 trb->generic.field[1] = 0; 169 trb->generic.field[2] = 0; 170 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); 171 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)); 172 173 xhci_dbc_giveback(req, -ESHUTDOWN); 174 } 175 176 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep) 177 { 178 struct dbc_request *req, *tmp; 179 180 list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending) 181 xhci_dbc_flush_single_request(req); 182 } 183 184 static void xhci_dbc_flush_reqests(struct xhci_dbc *dbc) 185 { 186 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]); 187 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]); 188 } 189 190 struct dbc_request * 191 dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags) 192 { 193 struct dbc_request *req; 194 195 req = kzalloc(sizeof(*req), gfp_flags); 196 if (!req) 197 return NULL; 198 199 req->dep = dep; 200 INIT_LIST_HEAD(&req->list_pending); 201 INIT_LIST_HEAD(&req->list_pool); 202 req->direction = dep->direction; 203 204 trace_xhci_dbc_alloc_request(req); 205 206 return req; 207 } 208 209 void 210 dbc_free_request(struct dbc_ep *dep, struct dbc_request *req) 211 { 212 trace_xhci_dbc_free_request(req); 213 214 kfree(req); 215 } 216 217 static void 218 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1, 219 u32 field2, u32 field3, u32 field4) 220 { 221 union xhci_trb *trb, *next; 222 223 trb = ring->enqueue; 224 trb->generic.field[0] = cpu_to_le32(field1); 225 trb->generic.field[1] = cpu_to_le32(field2); 226 trb->generic.field[2] = cpu_to_le32(field3); 227 trb->generic.field[3] = cpu_to_le32(field4); 228 229 trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic); 230 231 ring->num_trbs_free--; 232 next = ++(ring->enqueue); 233 if (TRB_TYPE_LINK_LE32(next->link.control)) { 234 next->link.control ^= cpu_to_le32(TRB_CYCLE); 235 ring->enqueue = ring->enq_seg->trbs; 236 ring->cycle_state ^= 1; 237 } 238 } 239 240 static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep, 241 struct dbc_request *req) 242 { 243 u64 addr; 244 union xhci_trb *trb; 245 unsigned int num_trbs; 246 struct xhci_dbc *dbc = dep->dbc; 247 struct xhci_ring *ring = dep->ring; 248 u32 length, control, cycle; 249 250 num_trbs = count_trbs(req->dma, req->length); 251 WARN_ON(num_trbs != 1); 252 if (ring->num_trbs_free < num_trbs) 253 return -EBUSY; 254 255 addr = req->dma; 256 trb = ring->enqueue; 257 cycle = ring->cycle_state; 258 length = TRB_LEN(req->length); 259 control = TRB_TYPE(TRB_NORMAL) | TRB_IOC; 260 261 if (cycle) 262 control &= cpu_to_le32(~TRB_CYCLE); 263 else 264 control |= cpu_to_le32(TRB_CYCLE); 265 266 req->trb = ring->enqueue; 267 req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); 268 xhci_dbc_queue_trb(ring, 269 lower_32_bits(addr), 270 upper_32_bits(addr), 271 length, control); 272 273 /* 274 * Add a barrier between writes of trb fields and flipping 275 * the cycle bit: 276 */ 277 wmb(); 278 279 if (cycle) 280 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE); 281 else 282 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE); 283 284 writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell); 285 286 return 0; 287 } 288 289 static int 290 dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req) 291 { 292 int ret; 293 struct device *dev; 294 struct xhci_dbc *dbc = dep->dbc; 295 struct xhci_hcd *xhci = dbc->xhci; 296 297 dev = xhci_to_hcd(xhci)->self.sysdev; 298 299 if (!req->length || !req->buf) 300 return -EINVAL; 301 302 req->actual = 0; 303 req->status = -EINPROGRESS; 304 305 req->dma = dma_map_single(dev, 306 req->buf, 307 req->length, 308 dbc_ep_dma_direction(dep)); 309 if (dma_mapping_error(dev, req->dma)) { 310 xhci_err(xhci, "failed to map buffer\n"); 311 return -EFAULT; 312 } 313 314 ret = xhci_dbc_queue_bulk_tx(dep, req); 315 if (ret) { 316 xhci_err(xhci, "failed to queue trbs\n"); 317 dma_unmap_single(dev, 318 req->dma, 319 req->length, 320 dbc_ep_dma_direction(dep)); 321 return -EFAULT; 322 } 323 324 list_add_tail(&req->list_pending, &dep->list_pending); 325 326 return 0; 327 } 328 329 int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req, 330 gfp_t gfp_flags) 331 { 332 unsigned long flags; 333 struct xhci_dbc *dbc = dep->dbc; 334 int ret = -ESHUTDOWN; 335 336 spin_lock_irqsave(&dbc->lock, flags); 337 if (dbc->state == DS_CONFIGURED) 338 ret = dbc_ep_do_queue(dep, req); 339 spin_unlock_irqrestore(&dbc->lock, flags); 340 341 mod_delayed_work(system_wq, &dbc->event_work, 0); 342 343 trace_xhci_dbc_queue_request(req); 344 345 return ret; 346 } 347 348 static inline void xhci_dbc_do_eps_init(struct xhci_hcd *xhci, bool direction) 349 { 350 struct dbc_ep *dep; 351 struct xhci_dbc *dbc = xhci->dbc; 352 353 dep = &dbc->eps[direction]; 354 dep->dbc = dbc; 355 dep->direction = direction; 356 dep->ring = direction ? dbc->ring_in : dbc->ring_out; 357 358 INIT_LIST_HEAD(&dep->list_pending); 359 } 360 361 static void xhci_dbc_eps_init(struct xhci_hcd *xhci) 362 { 363 xhci_dbc_do_eps_init(xhci, BULK_OUT); 364 xhci_dbc_do_eps_init(xhci, BULK_IN); 365 } 366 367 static void xhci_dbc_eps_exit(struct xhci_hcd *xhci) 368 { 369 struct xhci_dbc *dbc = xhci->dbc; 370 371 memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps)); 372 } 373 374 static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags) 375 { 376 int ret; 377 dma_addr_t deq; 378 u32 string_length; 379 struct xhci_dbc *dbc = xhci->dbc; 380 381 /* Allocate various rings for events and transfers: */ 382 dbc->ring_evt = xhci_ring_alloc(xhci, 1, 1, TYPE_EVENT, 0, flags); 383 if (!dbc->ring_evt) 384 goto evt_fail; 385 386 dbc->ring_in = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags); 387 if (!dbc->ring_in) 388 goto in_fail; 389 390 dbc->ring_out = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags); 391 if (!dbc->ring_out) 392 goto out_fail; 393 394 /* Allocate and populate ERST: */ 395 ret = xhci_alloc_erst(xhci, dbc->ring_evt, &dbc->erst, flags); 396 if (ret) 397 goto erst_fail; 398 399 /* Allocate context data structure: */ 400 dbc->ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); 401 if (!dbc->ctx) 402 goto ctx_fail; 403 404 /* Allocate the string table: */ 405 dbc->string_size = sizeof(struct dbc_str_descs); 406 dbc->string = dbc_dma_alloc_coherent(xhci, 407 dbc->string_size, 408 &dbc->string_dma, 409 flags); 410 if (!dbc->string) 411 goto string_fail; 412 413 /* Setup ERST register: */ 414 writel(dbc->erst.erst_size, &dbc->regs->ersts); 415 xhci_write_64(xhci, dbc->erst.erst_dma_addr, &dbc->regs->erstba); 416 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg, 417 dbc->ring_evt->dequeue); 418 xhci_write_64(xhci, deq, &dbc->regs->erdp); 419 420 /* Setup strings and contexts: */ 421 string_length = xhci_dbc_populate_strings(dbc->string); 422 xhci_dbc_init_contexts(xhci, string_length); 423 424 mmiowb(); 425 426 xhci_dbc_eps_init(xhci); 427 dbc->state = DS_INITIALIZED; 428 429 return 0; 430 431 string_fail: 432 xhci_free_container_ctx(xhci, dbc->ctx); 433 dbc->ctx = NULL; 434 ctx_fail: 435 xhci_free_erst(xhci, &dbc->erst); 436 erst_fail: 437 xhci_ring_free(xhci, dbc->ring_out); 438 dbc->ring_out = NULL; 439 out_fail: 440 xhci_ring_free(xhci, dbc->ring_in); 441 dbc->ring_in = NULL; 442 in_fail: 443 xhci_ring_free(xhci, dbc->ring_evt); 444 dbc->ring_evt = NULL; 445 evt_fail: 446 return -ENOMEM; 447 } 448 449 static void xhci_dbc_mem_cleanup(struct xhci_hcd *xhci) 450 { 451 struct xhci_dbc *dbc = xhci->dbc; 452 453 if (!dbc) 454 return; 455 456 xhci_dbc_eps_exit(xhci); 457 458 if (dbc->string) { 459 dbc_dma_free_coherent(xhci, 460 dbc->string_size, 461 dbc->string, dbc->string_dma); 462 dbc->string = NULL; 463 } 464 465 xhci_free_container_ctx(xhci, dbc->ctx); 466 dbc->ctx = NULL; 467 468 xhci_free_erst(xhci, &dbc->erst); 469 xhci_ring_free(xhci, dbc->ring_out); 470 xhci_ring_free(xhci, dbc->ring_in); 471 xhci_ring_free(xhci, dbc->ring_evt); 472 dbc->ring_in = NULL; 473 dbc->ring_out = NULL; 474 dbc->ring_evt = NULL; 475 } 476 477 static int xhci_do_dbc_start(struct xhci_hcd *xhci) 478 { 479 int ret; 480 u32 ctrl; 481 struct xhci_dbc *dbc = xhci->dbc; 482 483 if (dbc->state != DS_DISABLED) 484 return -EINVAL; 485 486 writel(0, &dbc->regs->control); 487 ret = xhci_handshake(&dbc->regs->control, 488 DBC_CTRL_DBC_ENABLE, 489 0, 1000); 490 if (ret) 491 return ret; 492 493 ret = xhci_dbc_mem_init(xhci, GFP_ATOMIC); 494 if (ret) 495 return ret; 496 497 ctrl = readl(&dbc->regs->control); 498 writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE, 499 &dbc->regs->control); 500 ret = xhci_handshake(&dbc->regs->control, 501 DBC_CTRL_DBC_ENABLE, 502 DBC_CTRL_DBC_ENABLE, 1000); 503 if (ret) 504 return ret; 505 506 dbc->state = DS_ENABLED; 507 508 return 0; 509 } 510 511 static void xhci_do_dbc_stop(struct xhci_hcd *xhci) 512 { 513 struct xhci_dbc *dbc = xhci->dbc; 514 515 if (dbc->state == DS_DISABLED) 516 return; 517 518 writel(0, &dbc->regs->control); 519 xhci_dbc_mem_cleanup(xhci); 520 dbc->state = DS_DISABLED; 521 } 522 523 static int xhci_dbc_start(struct xhci_hcd *xhci) 524 { 525 int ret; 526 unsigned long flags; 527 struct xhci_dbc *dbc = xhci->dbc; 528 529 WARN_ON(!dbc); 530 531 pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller); 532 533 spin_lock_irqsave(&dbc->lock, flags); 534 ret = xhci_do_dbc_start(xhci); 535 spin_unlock_irqrestore(&dbc->lock, flags); 536 537 if (ret) { 538 pm_runtime_put(xhci_to_hcd(xhci)->self.controller); 539 return ret; 540 } 541 542 return mod_delayed_work(system_wq, &dbc->event_work, 1); 543 } 544 545 static void xhci_dbc_stop(struct xhci_hcd *xhci) 546 { 547 unsigned long flags; 548 struct xhci_dbc *dbc = xhci->dbc; 549 struct dbc_port *port = &dbc->port; 550 551 WARN_ON(!dbc); 552 553 cancel_delayed_work_sync(&dbc->event_work); 554 555 if (port->registered) 556 xhci_dbc_tty_unregister_device(xhci); 557 558 spin_lock_irqsave(&dbc->lock, flags); 559 xhci_do_dbc_stop(xhci); 560 spin_unlock_irqrestore(&dbc->lock, flags); 561 562 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); 563 } 564 565 static void 566 dbc_handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event) 567 { 568 u32 portsc; 569 struct xhci_dbc *dbc = xhci->dbc; 570 571 portsc = readl(&dbc->regs->portsc); 572 if (portsc & DBC_PORTSC_CONN_CHANGE) 573 xhci_info(xhci, "DbC port connect change\n"); 574 575 if (portsc & DBC_PORTSC_RESET_CHANGE) 576 xhci_info(xhci, "DbC port reset change\n"); 577 578 if (portsc & DBC_PORTSC_LINK_CHANGE) 579 xhci_info(xhci, "DbC port link status change\n"); 580 581 if (portsc & DBC_PORTSC_CONFIG_CHANGE) 582 xhci_info(xhci, "DbC config error change\n"); 583 584 /* Port reset change bit will be cleared in other place: */ 585 writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc); 586 } 587 588 static void dbc_handle_xfer_event(struct xhci_hcd *xhci, union xhci_trb *event) 589 { 590 struct dbc_ep *dep; 591 struct xhci_ring *ring; 592 int ep_id; 593 int status; 594 u32 comp_code; 595 size_t remain_length; 596 struct dbc_request *req = NULL, *r; 597 598 comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2])); 599 remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2])); 600 ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3])); 601 dep = (ep_id == EPID_OUT) ? 602 get_out_ep(xhci) : get_in_ep(xhci); 603 ring = dep->ring; 604 605 switch (comp_code) { 606 case COMP_SUCCESS: 607 remain_length = 0; 608 /* FALLTHROUGH */ 609 case COMP_SHORT_PACKET: 610 status = 0; 611 break; 612 case COMP_TRB_ERROR: 613 case COMP_BABBLE_DETECTED_ERROR: 614 case COMP_USB_TRANSACTION_ERROR: 615 case COMP_STALL_ERROR: 616 xhci_warn(xhci, "tx error %d detected\n", comp_code); 617 status = -comp_code; 618 break; 619 default: 620 xhci_err(xhci, "unknown tx error %d\n", comp_code); 621 status = -comp_code; 622 break; 623 } 624 625 /* Match the pending request: */ 626 list_for_each_entry(r, &dep->list_pending, list_pending) { 627 if (r->trb_dma == event->trans_event.buffer) { 628 req = r; 629 break; 630 } 631 } 632 633 if (!req) { 634 xhci_warn(xhci, "no matched request\n"); 635 return; 636 } 637 638 trace_xhci_dbc_handle_transfer(ring, &req->trb->generic); 639 640 ring->num_trbs_free++; 641 req->actual = req->length - remain_length; 642 xhci_dbc_giveback(req, status); 643 } 644 645 static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) 646 { 647 dma_addr_t deq; 648 struct dbc_ep *dep; 649 union xhci_trb *evt; 650 u32 ctrl, portsc; 651 struct xhci_hcd *xhci = dbc->xhci; 652 bool update_erdp = false; 653 654 /* DbC state machine: */ 655 switch (dbc->state) { 656 case DS_DISABLED: 657 case DS_INITIALIZED: 658 659 return EVT_ERR; 660 case DS_ENABLED: 661 portsc = readl(&dbc->regs->portsc); 662 if (portsc & DBC_PORTSC_CONN_STATUS) { 663 dbc->state = DS_CONNECTED; 664 xhci_info(xhci, "DbC connected\n"); 665 } 666 667 return EVT_DONE; 668 case DS_CONNECTED: 669 ctrl = readl(&dbc->regs->control); 670 if (ctrl & DBC_CTRL_DBC_RUN) { 671 dbc->state = DS_CONFIGURED; 672 xhci_info(xhci, "DbC configured\n"); 673 portsc = readl(&dbc->regs->portsc); 674 writel(portsc, &dbc->regs->portsc); 675 return EVT_GSER; 676 } 677 678 return EVT_DONE; 679 case DS_CONFIGURED: 680 /* Handle cable unplug event: */ 681 portsc = readl(&dbc->regs->portsc); 682 if (!(portsc & DBC_PORTSC_PORT_ENABLED) && 683 !(portsc & DBC_PORTSC_CONN_STATUS)) { 684 xhci_info(xhci, "DbC cable unplugged\n"); 685 dbc->state = DS_ENABLED; 686 xhci_dbc_flush_reqests(dbc); 687 688 return EVT_DISC; 689 } 690 691 /* Handle debug port reset event: */ 692 if (portsc & DBC_PORTSC_RESET_CHANGE) { 693 xhci_info(xhci, "DbC port reset\n"); 694 writel(portsc, &dbc->regs->portsc); 695 dbc->state = DS_ENABLED; 696 xhci_dbc_flush_reqests(dbc); 697 698 return EVT_DISC; 699 } 700 701 /* Handle endpoint stall event: */ 702 ctrl = readl(&dbc->regs->control); 703 if ((ctrl & DBC_CTRL_HALT_IN_TR) || 704 (ctrl & DBC_CTRL_HALT_OUT_TR)) { 705 xhci_info(xhci, "DbC Endpoint stall\n"); 706 dbc->state = DS_STALLED; 707 708 if (ctrl & DBC_CTRL_HALT_IN_TR) { 709 dep = get_in_ep(xhci); 710 xhci_dbc_flush_endpoint_requests(dep); 711 } 712 713 if (ctrl & DBC_CTRL_HALT_OUT_TR) { 714 dep = get_out_ep(xhci); 715 xhci_dbc_flush_endpoint_requests(dep); 716 } 717 718 return EVT_DONE; 719 } 720 721 /* Clear DbC run change bit: */ 722 if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) { 723 writel(ctrl, &dbc->regs->control); 724 ctrl = readl(&dbc->regs->control); 725 } 726 727 break; 728 case DS_STALLED: 729 ctrl = readl(&dbc->regs->control); 730 if (!(ctrl & DBC_CTRL_HALT_IN_TR) && 731 !(ctrl & DBC_CTRL_HALT_OUT_TR) && 732 (ctrl & DBC_CTRL_DBC_RUN)) { 733 dbc->state = DS_CONFIGURED; 734 break; 735 } 736 737 return EVT_DONE; 738 default: 739 xhci_err(xhci, "Unknown DbC state %d\n", dbc->state); 740 break; 741 } 742 743 /* Handle the events in the event ring: */ 744 evt = dbc->ring_evt->dequeue; 745 while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) == 746 dbc->ring_evt->cycle_state) { 747 /* 748 * Add a barrier between reading the cycle flag and any 749 * reads of the event's flags/data below: 750 */ 751 rmb(); 752 753 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic); 754 755 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) { 756 case TRB_TYPE(TRB_PORT_STATUS): 757 dbc_handle_port_status(xhci, evt); 758 break; 759 case TRB_TYPE(TRB_TRANSFER): 760 dbc_handle_xfer_event(xhci, evt); 761 break; 762 default: 763 break; 764 } 765 766 inc_deq(xhci, dbc->ring_evt); 767 evt = dbc->ring_evt->dequeue; 768 update_erdp = true; 769 } 770 771 /* Update event ring dequeue pointer: */ 772 if (update_erdp) { 773 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg, 774 dbc->ring_evt->dequeue); 775 xhci_write_64(xhci, deq, &dbc->regs->erdp); 776 } 777 778 return EVT_DONE; 779 } 780 781 static void xhci_dbc_handle_events(struct work_struct *work) 782 { 783 int ret; 784 enum evtreturn evtr; 785 struct xhci_dbc *dbc; 786 unsigned long flags; 787 struct xhci_hcd *xhci; 788 789 dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work); 790 xhci = dbc->xhci; 791 792 spin_lock_irqsave(&dbc->lock, flags); 793 evtr = xhci_dbc_do_handle_events(dbc); 794 spin_unlock_irqrestore(&dbc->lock, flags); 795 796 switch (evtr) { 797 case EVT_GSER: 798 ret = xhci_dbc_tty_register_device(xhci); 799 if (ret) { 800 xhci_err(xhci, "failed to alloc tty device\n"); 801 break; 802 } 803 804 xhci_info(xhci, "DbC now attached to /dev/ttyDBC0\n"); 805 break; 806 case EVT_DISC: 807 xhci_dbc_tty_unregister_device(xhci); 808 break; 809 case EVT_DONE: 810 break; 811 default: 812 xhci_info(xhci, "stop handling dbc events\n"); 813 return; 814 } 815 816 mod_delayed_work(system_wq, &dbc->event_work, 1); 817 } 818 819 static void xhci_do_dbc_exit(struct xhci_hcd *xhci) 820 { 821 unsigned long flags; 822 823 spin_lock_irqsave(&xhci->lock, flags); 824 kfree(xhci->dbc); 825 xhci->dbc = NULL; 826 spin_unlock_irqrestore(&xhci->lock, flags); 827 } 828 829 static int xhci_do_dbc_init(struct xhci_hcd *xhci) 830 { 831 u32 reg; 832 struct xhci_dbc *dbc; 833 unsigned long flags; 834 void __iomem *base; 835 int dbc_cap_offs; 836 837 base = &xhci->cap_regs->hc_capbase; 838 dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG); 839 if (!dbc_cap_offs) 840 return -ENODEV; 841 842 dbc = kzalloc(sizeof(*dbc), GFP_KERNEL); 843 if (!dbc) 844 return -ENOMEM; 845 846 dbc->regs = base + dbc_cap_offs; 847 848 /* We will avoid using DbC in xhci driver if it's in use. */ 849 reg = readl(&dbc->regs->control); 850 if (reg & DBC_CTRL_DBC_ENABLE) { 851 kfree(dbc); 852 return -EBUSY; 853 } 854 855 spin_lock_irqsave(&xhci->lock, flags); 856 if (xhci->dbc) { 857 spin_unlock_irqrestore(&xhci->lock, flags); 858 kfree(dbc); 859 return -EBUSY; 860 } 861 xhci->dbc = dbc; 862 spin_unlock_irqrestore(&xhci->lock, flags); 863 864 dbc->xhci = xhci; 865 INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events); 866 spin_lock_init(&dbc->lock); 867 868 return 0; 869 } 870 871 static ssize_t dbc_show(struct device *dev, 872 struct device_attribute *attr, 873 char *buf) 874 { 875 const char *p; 876 struct xhci_dbc *dbc; 877 struct xhci_hcd *xhci; 878 879 xhci = hcd_to_xhci(dev_get_drvdata(dev)); 880 dbc = xhci->dbc; 881 882 switch (dbc->state) { 883 case DS_DISABLED: 884 p = "disabled"; 885 break; 886 case DS_INITIALIZED: 887 p = "initialized"; 888 break; 889 case DS_ENABLED: 890 p = "enabled"; 891 break; 892 case DS_CONNECTED: 893 p = "connected"; 894 break; 895 case DS_CONFIGURED: 896 p = "configured"; 897 break; 898 case DS_STALLED: 899 p = "stalled"; 900 break; 901 default: 902 p = "unknown"; 903 } 904 905 return sprintf(buf, "%s\n", p); 906 } 907 908 static ssize_t dbc_store(struct device *dev, 909 struct device_attribute *attr, 910 const char *buf, size_t count) 911 { 912 struct xhci_dbc *dbc; 913 struct xhci_hcd *xhci; 914 915 xhci = hcd_to_xhci(dev_get_drvdata(dev)); 916 dbc = xhci->dbc; 917 918 if (!strncmp(buf, "enable", 6)) 919 xhci_dbc_start(xhci); 920 else if (!strncmp(buf, "disable", 7)) 921 xhci_dbc_stop(xhci); 922 else 923 return -EINVAL; 924 925 return count; 926 } 927 928 static DEVICE_ATTR_RW(dbc); 929 930 int xhci_dbc_init(struct xhci_hcd *xhci) 931 { 932 int ret; 933 struct device *dev = xhci_to_hcd(xhci)->self.controller; 934 935 ret = xhci_do_dbc_init(xhci); 936 if (ret) 937 goto init_err3; 938 939 ret = xhci_dbc_tty_register_driver(xhci); 940 if (ret) 941 goto init_err2; 942 943 ret = device_create_file(dev, &dev_attr_dbc); 944 if (ret) 945 goto init_err1; 946 947 return 0; 948 949 init_err1: 950 xhci_dbc_tty_unregister_driver(); 951 init_err2: 952 xhci_do_dbc_exit(xhci); 953 init_err3: 954 return ret; 955 } 956 957 void xhci_dbc_exit(struct xhci_hcd *xhci) 958 { 959 struct device *dev = xhci_to_hcd(xhci)->self.controller; 960 961 if (!xhci->dbc) 962 return; 963 964 device_remove_file(dev, &dev_attr_dbc); 965 xhci_dbc_tty_unregister_driver(); 966 xhci_dbc_stop(xhci); 967 xhci_do_dbc_exit(xhci); 968 } 969 970 #ifdef CONFIG_PM 971 int xhci_dbc_suspend(struct xhci_hcd *xhci) 972 { 973 struct xhci_dbc *dbc = xhci->dbc; 974 975 if (!dbc) 976 return 0; 977 978 if (dbc->state == DS_CONFIGURED) 979 dbc->resume_required = 1; 980 981 xhci_dbc_stop(xhci); 982 983 return 0; 984 } 985 986 int xhci_dbc_resume(struct xhci_hcd *xhci) 987 { 988 int ret = 0; 989 struct xhci_dbc *dbc = xhci->dbc; 990 991 if (!dbc) 992 return 0; 993 994 if (dbc->resume_required) { 995 dbc->resume_required = 0; 996 xhci_dbc_start(xhci); 997 } 998 999 return ret; 1000 } 1001 #endif /* CONFIG_PM */ 1002