1 // SPDX-License-Identifier: GPL-2.0 2 /** 3 * xhci-dbc.c - xHCI debug capability early driver 4 * 5 * Copyright (C) 2016 Intel Corporation 6 * 7 * Author: Lu Baolu <baolu.lu@linux.intel.com> 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 11 12 #include <linux/console.h> 13 #include <linux/pci_regs.h> 14 #include <linux/pci_ids.h> 15 #include <linux/memblock.h> 16 #include <linux/io.h> 17 #include <asm/pci-direct.h> 18 #include <asm/fixmap.h> 19 #include <linux/bcd.h> 20 #include <linux/export.h> 21 #include <linux/module.h> 22 #include <linux/delay.h> 23 #include <linux/kthread.h> 24 25 #include "../host/xhci.h" 26 #include "xhci-dbc.h" 27 28 static struct xdbc_state xdbc; 29 static bool early_console_keep; 30 31 #ifdef XDBC_TRACE 32 #define xdbc_trace trace_printk 33 #else 34 static inline void xdbc_trace(const char *fmt, ...) { } 35 #endif /* XDBC_TRACE */ 36 37 static void __iomem * __init xdbc_map_pci_mmio(u32 bus, u32 dev, u32 func) 38 { 39 u64 val64, sz64, mask64; 40 void __iomem *base; 41 u32 val, sz; 42 u8 byte; 43 44 val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0); 45 write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, ~0); 46 sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0); 47 write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, val); 48 49 if (val == 0xffffffff || sz == 0xffffffff) { 50 pr_notice("invalid mmio bar\n"); 51 return NULL; 52 } 53 54 val64 = val & PCI_BASE_ADDRESS_MEM_MASK; 55 sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK; 56 mask64 = PCI_BASE_ADDRESS_MEM_MASK; 57 58 if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) { 59 val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4); 60 write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, ~0); 61 sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4); 62 write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, val); 63 64 val64 |= (u64)val << 32; 65 sz64 |= (u64)sz << 32; 66 mask64 |= ~0ULL << 32; 67 } 68 69 sz64 &= mask64; 70 71 if (!sz64) { 72 pr_notice("invalid mmio address\n"); 73 return NULL; 74 } 75 76 sz64 = 1ULL << __ffs64(sz64); 77 78 /* Check if the mem space is enabled: */ 79 byte = read_pci_config_byte(bus, dev, func, PCI_COMMAND); 80 if (!(byte & PCI_COMMAND_MEMORY)) { 81 byte |= PCI_COMMAND_MEMORY; 82 write_pci_config_byte(bus, dev, func, PCI_COMMAND, byte); 83 } 84 85 xdbc.xhci_start = val64; 86 xdbc.xhci_length = sz64; 87 base = early_ioremap(val64, sz64); 88 89 return base; 90 } 91 92 static void * __init xdbc_get_page(dma_addr_t *dma_addr) 93 { 94 void *virt; 95 96 virt = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 97 if (!virt) 98 return NULL; 99 100 if (dma_addr) 101 *dma_addr = (dma_addr_t)__pa(virt); 102 103 return virt; 104 } 105 106 static u32 __init xdbc_find_dbgp(int xdbc_num, u32 *b, u32 *d, u32 *f) 107 { 108 u32 bus, dev, func, class; 109 110 for (bus = 0; bus < XDBC_PCI_MAX_BUSES; bus++) { 111 for (dev = 0; dev < XDBC_PCI_MAX_DEVICES; dev++) { 112 for (func = 0; func < XDBC_PCI_MAX_FUNCTION; func++) { 113 114 class = read_pci_config(bus, dev, func, PCI_CLASS_REVISION); 115 if ((class >> 8) != PCI_CLASS_SERIAL_USB_XHCI) 116 continue; 117 118 if (xdbc_num-- != 0) 119 continue; 120 121 *b = bus; 122 *d = dev; 123 *f = func; 124 125 return 0; 126 } 127 } 128 } 129 130 return -1; 131 } 132 133 static int handshake(void __iomem *ptr, u32 mask, u32 done, int wait, int delay) 134 { 135 u32 result; 136 137 do { 138 result = readl(ptr); 139 result &= mask; 140 if (result == done) 141 return 0; 142 udelay(delay); 143 wait -= delay; 144 } while (wait > 0); 145 146 return -ETIMEDOUT; 147 } 148 149 static void __init xdbc_bios_handoff(void) 150 { 151 int offset, timeout; 152 u32 val; 153 154 offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_LEGACY); 155 val = readl(xdbc.xhci_base + offset); 156 157 if (val & XHCI_HC_BIOS_OWNED) { 158 writel(val | XHCI_HC_OS_OWNED, xdbc.xhci_base + offset); 159 timeout = handshake(xdbc.xhci_base + offset, XHCI_HC_BIOS_OWNED, 0, 5000, 10); 160 161 if (timeout) { 162 pr_notice("failed to hand over xHCI control from BIOS\n"); 163 writel(val & ~XHCI_HC_BIOS_OWNED, xdbc.xhci_base + offset); 164 } 165 } 166 167 /* Disable BIOS SMIs and clear all SMI events: */ 168 val = readl(xdbc.xhci_base + offset + XHCI_LEGACY_CONTROL_OFFSET); 169 val &= XHCI_LEGACY_DISABLE_SMI; 170 val |= XHCI_LEGACY_SMI_EVENTS; 171 writel(val, xdbc.xhci_base + offset + XHCI_LEGACY_CONTROL_OFFSET); 172 } 173 174 static int __init 175 xdbc_alloc_ring(struct xdbc_segment *seg, struct xdbc_ring *ring) 176 { 177 seg->trbs = xdbc_get_page(&seg->dma); 178 if (!seg->trbs) 179 return -ENOMEM; 180 181 ring->segment = seg; 182 183 return 0; 184 } 185 186 static void __init xdbc_free_ring(struct xdbc_ring *ring) 187 { 188 struct xdbc_segment *seg = ring->segment; 189 190 if (!seg) 191 return; 192 193 memblock_free(seg->dma, PAGE_SIZE); 194 ring->segment = NULL; 195 } 196 197 static void xdbc_reset_ring(struct xdbc_ring *ring) 198 { 199 struct xdbc_segment *seg = ring->segment; 200 struct xdbc_trb *link_trb; 201 202 memset(seg->trbs, 0, PAGE_SIZE); 203 204 ring->enqueue = seg->trbs; 205 ring->dequeue = seg->trbs; 206 ring->cycle_state = 1; 207 208 if (ring != &xdbc.evt_ring) { 209 link_trb = &seg->trbs[XDBC_TRBS_PER_SEGMENT - 1]; 210 link_trb->field[0] = cpu_to_le32(lower_32_bits(seg->dma)); 211 link_trb->field[1] = cpu_to_le32(upper_32_bits(seg->dma)); 212 link_trb->field[3] = cpu_to_le32(TRB_TYPE(TRB_LINK)) | cpu_to_le32(LINK_TOGGLE); 213 } 214 } 215 216 static inline void xdbc_put_utf16(u16 *s, const char *c, size_t size) 217 { 218 int i; 219 220 for (i = 0; i < size; i++) 221 s[i] = cpu_to_le16(c[i]); 222 } 223 224 static void xdbc_mem_init(void) 225 { 226 struct xdbc_ep_context *ep_in, *ep_out; 227 struct usb_string_descriptor *s_desc; 228 struct xdbc_erst_entry *entry; 229 struct xdbc_strings *strings; 230 struct xdbc_context *ctx; 231 unsigned int max_burst; 232 u32 string_length; 233 int index = 0; 234 u32 dev_info; 235 236 xdbc_reset_ring(&xdbc.evt_ring); 237 xdbc_reset_ring(&xdbc.in_ring); 238 xdbc_reset_ring(&xdbc.out_ring); 239 memset(xdbc.table_base, 0, PAGE_SIZE); 240 memset(xdbc.out_buf, 0, PAGE_SIZE); 241 242 /* Initialize event ring segment table: */ 243 xdbc.erst_size = 16; 244 xdbc.erst_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE; 245 xdbc.erst_dma = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE; 246 247 index += XDBC_ERST_ENTRY_NUM; 248 entry = (struct xdbc_erst_entry *)xdbc.erst_base; 249 250 entry->seg_addr = cpu_to_le64(xdbc.evt_seg.dma); 251 entry->seg_size = cpu_to_le32(XDBC_TRBS_PER_SEGMENT); 252 entry->__reserved_0 = 0; 253 254 /* Initialize ERST registers: */ 255 writel(1, &xdbc.xdbc_reg->ersts); 256 xdbc_write64(xdbc.erst_dma, &xdbc.xdbc_reg->erstba); 257 xdbc_write64(xdbc.evt_seg.dma, &xdbc.xdbc_reg->erdp); 258 259 /* Debug capability contexts: */ 260 xdbc.dbcc_size = 64 * 3; 261 xdbc.dbcc_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE; 262 xdbc.dbcc_dma = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE; 263 264 index += XDBC_DBCC_ENTRY_NUM; 265 266 /* Popluate the strings: */ 267 xdbc.string_size = sizeof(struct xdbc_strings); 268 xdbc.string_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE; 269 xdbc.string_dma = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE; 270 strings = (struct xdbc_strings *)xdbc.string_base; 271 272 index += XDBC_STRING_ENTRY_NUM; 273 274 /* Serial string: */ 275 s_desc = (struct usb_string_descriptor *)strings->serial; 276 s_desc->bLength = (strlen(XDBC_STRING_SERIAL) + 1) * 2; 277 s_desc->bDescriptorType = USB_DT_STRING; 278 279 xdbc_put_utf16(s_desc->wData, XDBC_STRING_SERIAL, strlen(XDBC_STRING_SERIAL)); 280 string_length = s_desc->bLength; 281 string_length <<= 8; 282 283 /* Product string: */ 284 s_desc = (struct usb_string_descriptor *)strings->product; 285 s_desc->bLength = (strlen(XDBC_STRING_PRODUCT) + 1) * 2; 286 s_desc->bDescriptorType = USB_DT_STRING; 287 288 xdbc_put_utf16(s_desc->wData, XDBC_STRING_PRODUCT, strlen(XDBC_STRING_PRODUCT)); 289 string_length += s_desc->bLength; 290 string_length <<= 8; 291 292 /* Manufacture string: */ 293 s_desc = (struct usb_string_descriptor *)strings->manufacturer; 294 s_desc->bLength = (strlen(XDBC_STRING_MANUFACTURER) + 1) * 2; 295 s_desc->bDescriptorType = USB_DT_STRING; 296 297 xdbc_put_utf16(s_desc->wData, XDBC_STRING_MANUFACTURER, strlen(XDBC_STRING_MANUFACTURER)); 298 string_length += s_desc->bLength; 299 string_length <<= 8; 300 301 /* String0: */ 302 strings->string0[0] = 4; 303 strings->string0[1] = USB_DT_STRING; 304 strings->string0[2] = 0x09; 305 strings->string0[3] = 0x04; 306 307 string_length += 4; 308 309 /* Populate info Context: */ 310 ctx = (struct xdbc_context *)xdbc.dbcc_base; 311 312 ctx->info.string0 = cpu_to_le64(xdbc.string_dma); 313 ctx->info.manufacturer = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH); 314 ctx->info.product = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH * 2); 315 ctx->info.serial = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH * 3); 316 ctx->info.length = cpu_to_le32(string_length); 317 318 /* Populate bulk out endpoint context: */ 319 max_burst = DEBUG_MAX_BURST(readl(&xdbc.xdbc_reg->control)); 320 ep_out = (struct xdbc_ep_context *)&ctx->out; 321 322 ep_out->ep_info1 = 0; 323 ep_out->ep_info2 = cpu_to_le32(EP_TYPE(BULK_OUT_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst)); 324 ep_out->deq = cpu_to_le64(xdbc.out_seg.dma | xdbc.out_ring.cycle_state); 325 326 /* Populate bulk in endpoint context: */ 327 ep_in = (struct xdbc_ep_context *)&ctx->in; 328 329 ep_in->ep_info1 = 0; 330 ep_in->ep_info2 = cpu_to_le32(EP_TYPE(BULK_IN_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst)); 331 ep_in->deq = cpu_to_le64(xdbc.in_seg.dma | xdbc.in_ring.cycle_state); 332 333 /* Set DbC context and info registers: */ 334 xdbc_write64(xdbc.dbcc_dma, &xdbc.xdbc_reg->dccp); 335 336 dev_info = cpu_to_le32((XDBC_VENDOR_ID << 16) | XDBC_PROTOCOL); 337 writel(dev_info, &xdbc.xdbc_reg->devinfo1); 338 339 dev_info = cpu_to_le32((XDBC_DEVICE_REV << 16) | XDBC_PRODUCT_ID); 340 writel(dev_info, &xdbc.xdbc_reg->devinfo2); 341 342 xdbc.in_buf = xdbc.out_buf + XDBC_MAX_PACKET; 343 xdbc.in_dma = xdbc.out_dma + XDBC_MAX_PACKET; 344 } 345 346 static void xdbc_do_reset_debug_port(u32 id, u32 count) 347 { 348 void __iomem *ops_reg; 349 void __iomem *portsc; 350 u32 val, cap_length; 351 int i; 352 353 cap_length = readl(xdbc.xhci_base) & 0xff; 354 ops_reg = xdbc.xhci_base + cap_length; 355 356 id--; 357 for (i = id; i < (id + count); i++) { 358 portsc = ops_reg + 0x400 + i * 0x10; 359 val = readl(portsc); 360 if (!(val & PORT_CONNECT)) 361 writel(val | PORT_RESET, portsc); 362 } 363 } 364 365 static void xdbc_reset_debug_port(void) 366 { 367 u32 val, port_offset, port_count; 368 int offset = 0; 369 370 do { 371 offset = xhci_find_next_ext_cap(xdbc.xhci_base, offset, XHCI_EXT_CAPS_PROTOCOL); 372 if (!offset) 373 break; 374 375 val = readl(xdbc.xhci_base + offset); 376 if (XHCI_EXT_PORT_MAJOR(val) != 0x3) 377 continue; 378 379 val = readl(xdbc.xhci_base + offset + 8); 380 port_offset = XHCI_EXT_PORT_OFF(val); 381 port_count = XHCI_EXT_PORT_COUNT(val); 382 383 xdbc_do_reset_debug_port(port_offset, port_count); 384 } while (1); 385 } 386 387 static void 388 xdbc_queue_trb(struct xdbc_ring *ring, u32 field1, u32 field2, u32 field3, u32 field4) 389 { 390 struct xdbc_trb *trb, *link_trb; 391 392 trb = ring->enqueue; 393 trb->field[0] = cpu_to_le32(field1); 394 trb->field[1] = cpu_to_le32(field2); 395 trb->field[2] = cpu_to_le32(field3); 396 trb->field[3] = cpu_to_le32(field4); 397 398 ++(ring->enqueue); 399 if (ring->enqueue >= &ring->segment->trbs[TRBS_PER_SEGMENT - 1]) { 400 link_trb = ring->enqueue; 401 if (ring->cycle_state) 402 link_trb->field[3] |= cpu_to_le32(TRB_CYCLE); 403 else 404 link_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); 405 406 ring->enqueue = ring->segment->trbs; 407 ring->cycle_state ^= 1; 408 } 409 } 410 411 static void xdbc_ring_doorbell(int target) 412 { 413 writel(DOOR_BELL_TARGET(target), &xdbc.xdbc_reg->doorbell); 414 } 415 416 static int xdbc_start(void) 417 { 418 u32 ctrl, status; 419 int ret; 420 421 ctrl = readl(&xdbc.xdbc_reg->control); 422 writel(ctrl | CTRL_DBC_ENABLE | CTRL_PORT_ENABLE, &xdbc.xdbc_reg->control); 423 ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, CTRL_DBC_ENABLE, 100000, 100); 424 if (ret) { 425 xdbc_trace("failed to initialize hardware\n"); 426 return ret; 427 } 428 429 /* Reset port to avoid bus hang: */ 430 if (xdbc.vendor == PCI_VENDOR_ID_INTEL) 431 xdbc_reset_debug_port(); 432 433 /* Wait for port connection: */ 434 ret = handshake(&xdbc.xdbc_reg->portsc, PORTSC_CONN_STATUS, PORTSC_CONN_STATUS, 5000000, 100); 435 if (ret) { 436 xdbc_trace("waiting for connection timed out\n"); 437 return ret; 438 } 439 440 /* Wait for debug device to be configured: */ 441 ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_RUN, CTRL_DBC_RUN, 5000000, 100); 442 if (ret) { 443 xdbc_trace("waiting for device configuration timed out\n"); 444 return ret; 445 } 446 447 /* Check port number: */ 448 status = readl(&xdbc.xdbc_reg->status); 449 if (!DCST_DEBUG_PORT(status)) { 450 xdbc_trace("invalid root hub port number\n"); 451 return -ENODEV; 452 } 453 454 xdbc.port_number = DCST_DEBUG_PORT(status); 455 456 xdbc_trace("DbC is running now, control 0x%08x port ID %d\n", 457 readl(&xdbc.xdbc_reg->control), xdbc.port_number); 458 459 return 0; 460 } 461 462 static int xdbc_bulk_transfer(void *data, int size, bool read) 463 { 464 struct xdbc_ring *ring; 465 struct xdbc_trb *trb; 466 u32 length, control; 467 u32 cycle; 468 u64 addr; 469 470 if (size > XDBC_MAX_PACKET) { 471 xdbc_trace("bad parameter, size %d\n", size); 472 return -EINVAL; 473 } 474 475 if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED) || 476 !(xdbc.flags & XDBC_FLAGS_CONFIGURED) || 477 (!read && (xdbc.flags & XDBC_FLAGS_OUT_STALL)) || 478 (read && (xdbc.flags & XDBC_FLAGS_IN_STALL))) { 479 480 xdbc_trace("connection not ready, flags %08x\n", xdbc.flags); 481 return -EIO; 482 } 483 484 ring = (read ? &xdbc.in_ring : &xdbc.out_ring); 485 trb = ring->enqueue; 486 cycle = ring->cycle_state; 487 length = TRB_LEN(size); 488 control = TRB_TYPE(TRB_NORMAL) | TRB_IOC; 489 490 if (cycle) 491 control &= cpu_to_le32(~TRB_CYCLE); 492 else 493 control |= cpu_to_le32(TRB_CYCLE); 494 495 if (read) { 496 memset(xdbc.in_buf, 0, XDBC_MAX_PACKET); 497 addr = xdbc.in_dma; 498 xdbc.flags |= XDBC_FLAGS_IN_PROCESS; 499 } else { 500 memset(xdbc.out_buf, 0, XDBC_MAX_PACKET); 501 memcpy(xdbc.out_buf, data, size); 502 addr = xdbc.out_dma; 503 xdbc.flags |= XDBC_FLAGS_OUT_PROCESS; 504 } 505 506 xdbc_queue_trb(ring, lower_32_bits(addr), upper_32_bits(addr), length, control); 507 508 /* 509 * Add a barrier between writes of trb fields and flipping 510 * the cycle bit: 511 */ 512 wmb(); 513 if (cycle) 514 trb->field[3] |= cpu_to_le32(cycle); 515 else 516 trb->field[3] &= cpu_to_le32(~TRB_CYCLE); 517 518 xdbc_ring_doorbell(read ? IN_EP_DOORBELL : OUT_EP_DOORBELL); 519 520 return size; 521 } 522 523 static int xdbc_handle_external_reset(void) 524 { 525 int ret = 0; 526 527 xdbc.flags = 0; 528 writel(0, &xdbc.xdbc_reg->control); 529 ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, 0, 100000, 10); 530 if (ret) 531 goto reset_out; 532 533 xdbc_mem_init(); 534 535 ret = xdbc_start(); 536 if (ret < 0) 537 goto reset_out; 538 539 xdbc_trace("dbc recovered\n"); 540 541 xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED; 542 543 xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true); 544 545 return 0; 546 547 reset_out: 548 xdbc_trace("failed to recover from external reset\n"); 549 return ret; 550 } 551 552 static int __init xdbc_early_setup(void) 553 { 554 int ret; 555 556 writel(0, &xdbc.xdbc_reg->control); 557 ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, 0, 100000, 100); 558 if (ret) 559 return ret; 560 561 /* Allocate the table page: */ 562 xdbc.table_base = xdbc_get_page(&xdbc.table_dma); 563 if (!xdbc.table_base) 564 return -ENOMEM; 565 566 /* Get and store the transfer buffer: */ 567 xdbc.out_buf = xdbc_get_page(&xdbc.out_dma); 568 if (!xdbc.out_buf) 569 return -ENOMEM; 570 571 /* Allocate the event ring: */ 572 ret = xdbc_alloc_ring(&xdbc.evt_seg, &xdbc.evt_ring); 573 if (ret < 0) 574 return ret; 575 576 /* Allocate IN/OUT endpoint transfer rings: */ 577 ret = xdbc_alloc_ring(&xdbc.in_seg, &xdbc.in_ring); 578 if (ret < 0) 579 return ret; 580 581 ret = xdbc_alloc_ring(&xdbc.out_seg, &xdbc.out_ring); 582 if (ret < 0) 583 return ret; 584 585 xdbc_mem_init(); 586 587 ret = xdbc_start(); 588 if (ret < 0) { 589 writel(0, &xdbc.xdbc_reg->control); 590 return ret; 591 } 592 593 xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED; 594 595 xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true); 596 597 return 0; 598 } 599 600 int __init early_xdbc_parse_parameter(char *s) 601 { 602 unsigned long dbgp_num = 0; 603 u32 bus, dev, func, offset; 604 int ret; 605 606 if (!early_pci_allowed()) 607 return -EPERM; 608 609 if (strstr(s, "keep")) 610 early_console_keep = true; 611 612 if (xdbc.xdbc_reg) 613 return 0; 614 615 if (*s && kstrtoul(s, 0, &dbgp_num)) 616 dbgp_num = 0; 617 618 pr_notice("dbgp_num: %lu\n", dbgp_num); 619 620 /* Locate the host controller: */ 621 ret = xdbc_find_dbgp(dbgp_num, &bus, &dev, &func); 622 if (ret) { 623 pr_notice("failed to locate xhci host\n"); 624 return -ENODEV; 625 } 626 627 xdbc.vendor = read_pci_config_16(bus, dev, func, PCI_VENDOR_ID); 628 xdbc.device = read_pci_config_16(bus, dev, func, PCI_DEVICE_ID); 629 xdbc.bus = bus; 630 xdbc.dev = dev; 631 xdbc.func = func; 632 633 /* Map the IO memory: */ 634 xdbc.xhci_base = xdbc_map_pci_mmio(bus, dev, func); 635 if (!xdbc.xhci_base) 636 return -EINVAL; 637 638 /* Locate DbC registers: */ 639 offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_DEBUG); 640 if (!offset) { 641 pr_notice("xhci host doesn't support debug capability\n"); 642 early_iounmap(xdbc.xhci_base, xdbc.xhci_length); 643 xdbc.xhci_base = NULL; 644 xdbc.xhci_length = 0; 645 646 return -ENODEV; 647 } 648 xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset); 649 650 return 0; 651 } 652 653 int __init early_xdbc_setup_hardware(void) 654 { 655 int ret; 656 657 if (!xdbc.xdbc_reg) 658 return -ENODEV; 659 660 xdbc_bios_handoff(); 661 662 raw_spin_lock_init(&xdbc.lock); 663 664 ret = xdbc_early_setup(); 665 if (ret) { 666 pr_notice("failed to setup the connection to host\n"); 667 668 xdbc_free_ring(&xdbc.evt_ring); 669 xdbc_free_ring(&xdbc.out_ring); 670 xdbc_free_ring(&xdbc.in_ring); 671 672 if (xdbc.table_dma) 673 memblock_free(xdbc.table_dma, PAGE_SIZE); 674 675 if (xdbc.out_dma) 676 memblock_free(xdbc.out_dma, PAGE_SIZE); 677 678 xdbc.table_base = NULL; 679 xdbc.out_buf = NULL; 680 } 681 682 return ret; 683 } 684 685 static void xdbc_handle_port_status(struct xdbc_trb *evt_trb) 686 { 687 u32 port_reg; 688 689 port_reg = readl(&xdbc.xdbc_reg->portsc); 690 if (port_reg & PORTSC_CONN_CHANGE) { 691 xdbc_trace("connect status change event\n"); 692 693 /* Check whether cable unplugged: */ 694 if (!(port_reg & PORTSC_CONN_STATUS)) { 695 xdbc.flags = 0; 696 xdbc_trace("cable unplugged\n"); 697 } 698 } 699 700 if (port_reg & PORTSC_RESET_CHANGE) 701 xdbc_trace("port reset change event\n"); 702 703 if (port_reg & PORTSC_LINK_CHANGE) 704 xdbc_trace("port link status change event\n"); 705 706 if (port_reg & PORTSC_CONFIG_CHANGE) 707 xdbc_trace("config error change\n"); 708 709 /* Write back the value to clear RW1C bits: */ 710 writel(port_reg, &xdbc.xdbc_reg->portsc); 711 } 712 713 static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb) 714 { 715 u32 comp_code; 716 int ep_id; 717 718 comp_code = GET_COMP_CODE(le32_to_cpu(evt_trb->field[2])); 719 ep_id = TRB_TO_EP_ID(le32_to_cpu(evt_trb->field[3])); 720 721 switch (comp_code) { 722 case COMP_SUCCESS: 723 case COMP_SHORT_PACKET: 724 break; 725 case COMP_TRB_ERROR: 726 case COMP_BABBLE_DETECTED_ERROR: 727 case COMP_USB_TRANSACTION_ERROR: 728 case COMP_STALL_ERROR: 729 default: 730 if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) 731 xdbc.flags |= XDBC_FLAGS_OUT_STALL; 732 if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) 733 xdbc.flags |= XDBC_FLAGS_IN_STALL; 734 735 xdbc_trace("endpoint %d stalled\n", ep_id); 736 break; 737 } 738 739 if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) { 740 xdbc.flags &= ~XDBC_FLAGS_IN_PROCESS; 741 xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true); 742 } else if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) { 743 xdbc.flags &= ~XDBC_FLAGS_OUT_PROCESS; 744 } else { 745 xdbc_trace("invalid endpoint id %d\n", ep_id); 746 } 747 } 748 749 static void xdbc_handle_events(void) 750 { 751 struct xdbc_trb *evt_trb; 752 bool update_erdp = false; 753 u32 reg; 754 u8 cmd; 755 756 cmd = read_pci_config_byte(xdbc.bus, xdbc.dev, xdbc.func, PCI_COMMAND); 757 if (!(cmd & PCI_COMMAND_MASTER)) { 758 cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; 759 write_pci_config_byte(xdbc.bus, xdbc.dev, xdbc.func, PCI_COMMAND, cmd); 760 } 761 762 if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED)) 763 return; 764 765 /* Handle external reset events: */ 766 reg = readl(&xdbc.xdbc_reg->control); 767 if (!(reg & CTRL_DBC_ENABLE)) { 768 if (xdbc_handle_external_reset()) { 769 xdbc_trace("failed to recover connection\n"); 770 return; 771 } 772 } 773 774 /* Handle configure-exit event: */ 775 reg = readl(&xdbc.xdbc_reg->control); 776 if (reg & CTRL_DBC_RUN_CHANGE) { 777 writel(reg, &xdbc.xdbc_reg->control); 778 if (reg & CTRL_DBC_RUN) 779 xdbc.flags |= XDBC_FLAGS_CONFIGURED; 780 else 781 xdbc.flags &= ~XDBC_FLAGS_CONFIGURED; 782 } 783 784 /* Handle endpoint stall event: */ 785 reg = readl(&xdbc.xdbc_reg->control); 786 if (reg & CTRL_HALT_IN_TR) { 787 xdbc.flags |= XDBC_FLAGS_IN_STALL; 788 } else { 789 xdbc.flags &= ~XDBC_FLAGS_IN_STALL; 790 if (!(xdbc.flags & XDBC_FLAGS_IN_PROCESS)) 791 xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true); 792 } 793 794 if (reg & CTRL_HALT_OUT_TR) 795 xdbc.flags |= XDBC_FLAGS_OUT_STALL; 796 else 797 xdbc.flags &= ~XDBC_FLAGS_OUT_STALL; 798 799 /* Handle the events in the event ring: */ 800 evt_trb = xdbc.evt_ring.dequeue; 801 while ((le32_to_cpu(evt_trb->field[3]) & TRB_CYCLE) == xdbc.evt_ring.cycle_state) { 802 /* 803 * Add a barrier between reading the cycle flag and any 804 * reads of the event's flags/data below: 805 */ 806 rmb(); 807 808 switch ((le32_to_cpu(evt_trb->field[3]) & TRB_TYPE_BITMASK)) { 809 case TRB_TYPE(TRB_PORT_STATUS): 810 xdbc_handle_port_status(evt_trb); 811 break; 812 case TRB_TYPE(TRB_TRANSFER): 813 xdbc_handle_tx_event(evt_trb); 814 break; 815 default: 816 break; 817 } 818 819 ++(xdbc.evt_ring.dequeue); 820 if (xdbc.evt_ring.dequeue == &xdbc.evt_seg.trbs[TRBS_PER_SEGMENT]) { 821 xdbc.evt_ring.dequeue = xdbc.evt_seg.trbs; 822 xdbc.evt_ring.cycle_state ^= 1; 823 } 824 825 evt_trb = xdbc.evt_ring.dequeue; 826 update_erdp = true; 827 } 828 829 /* Update event ring dequeue pointer: */ 830 if (update_erdp) 831 xdbc_write64(__pa(xdbc.evt_ring.dequeue), &xdbc.xdbc_reg->erdp); 832 } 833 834 static int xdbc_bulk_write(const char *bytes, int size) 835 { 836 int ret, timeout = 0; 837 unsigned long flags; 838 839 retry: 840 if (in_nmi()) { 841 if (!raw_spin_trylock_irqsave(&xdbc.lock, flags)) 842 return -EAGAIN; 843 } else { 844 raw_spin_lock_irqsave(&xdbc.lock, flags); 845 } 846 847 xdbc_handle_events(); 848 849 /* Check completion of the previous request: */ 850 if ((xdbc.flags & XDBC_FLAGS_OUT_PROCESS) && (timeout < 2000000)) { 851 raw_spin_unlock_irqrestore(&xdbc.lock, flags); 852 udelay(100); 853 timeout += 100; 854 goto retry; 855 } 856 857 if (xdbc.flags & XDBC_FLAGS_OUT_PROCESS) { 858 raw_spin_unlock_irqrestore(&xdbc.lock, flags); 859 xdbc_trace("previous transfer not completed yet\n"); 860 861 return -ETIMEDOUT; 862 } 863 864 ret = xdbc_bulk_transfer((void *)bytes, size, false); 865 raw_spin_unlock_irqrestore(&xdbc.lock, flags); 866 867 return ret; 868 } 869 870 static void early_xdbc_write(struct console *con, const char *str, u32 n) 871 { 872 static char buf[XDBC_MAX_PACKET]; 873 int chunk, ret; 874 int use_cr = 0; 875 876 if (!xdbc.xdbc_reg) 877 return; 878 memset(buf, 0, XDBC_MAX_PACKET); 879 while (n > 0) { 880 for (chunk = 0; chunk < XDBC_MAX_PACKET && n > 0; str++, chunk++, n--) { 881 882 if (!use_cr && *str == '\n') { 883 use_cr = 1; 884 buf[chunk] = '\r'; 885 str--; 886 n++; 887 continue; 888 } 889 890 if (use_cr) 891 use_cr = 0; 892 buf[chunk] = *str; 893 } 894 895 if (chunk > 0) { 896 ret = xdbc_bulk_write(buf, chunk); 897 if (ret < 0) 898 xdbc_trace("missed message {%s}\n", buf); 899 } 900 } 901 } 902 903 static struct console early_xdbc_console = { 904 .name = "earlyxdbc", 905 .write = early_xdbc_write, 906 .flags = CON_PRINTBUFFER, 907 .index = -1, 908 }; 909 910 void __init early_xdbc_register_console(void) 911 { 912 if (early_console) 913 return; 914 915 early_console = &early_xdbc_console; 916 if (early_console_keep) 917 early_console->flags &= ~CON_BOOT; 918 else 919 early_console->flags |= CON_BOOT; 920 register_console(early_console); 921 } 922 923 static void xdbc_unregister_console(void) 924 { 925 if (early_xdbc_console.flags & CON_ENABLED) 926 unregister_console(&early_xdbc_console); 927 } 928 929 static int xdbc_scrub_function(void *ptr) 930 { 931 unsigned long flags; 932 933 while (true) { 934 raw_spin_lock_irqsave(&xdbc.lock, flags); 935 xdbc_handle_events(); 936 937 if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED)) { 938 raw_spin_unlock_irqrestore(&xdbc.lock, flags); 939 break; 940 } 941 942 raw_spin_unlock_irqrestore(&xdbc.lock, flags); 943 schedule_timeout_interruptible(1); 944 } 945 946 xdbc_unregister_console(); 947 writel(0, &xdbc.xdbc_reg->control); 948 xdbc_trace("dbc scrub function exits\n"); 949 950 return 0; 951 } 952 953 static int __init xdbc_init(void) 954 { 955 unsigned long flags; 956 void __iomem *base; 957 int ret = 0; 958 u32 offset; 959 960 if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED)) 961 return 0; 962 963 /* 964 * It's time to shut down the DbC, so that the debug 965 * port can be reused by the host controller: 966 */ 967 if (early_xdbc_console.index == -1 || 968 (early_xdbc_console.flags & CON_BOOT)) { 969 xdbc_trace("hardware not used anymore\n"); 970 goto free_and_quit; 971 } 972 973 base = ioremap(xdbc.xhci_start, xdbc.xhci_length); 974 if (!base) { 975 xdbc_trace("failed to remap the io address\n"); 976 ret = -ENOMEM; 977 goto free_and_quit; 978 } 979 980 raw_spin_lock_irqsave(&xdbc.lock, flags); 981 early_iounmap(xdbc.xhci_base, xdbc.xhci_length); 982 xdbc.xhci_base = base; 983 offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_DEBUG); 984 xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset); 985 raw_spin_unlock_irqrestore(&xdbc.lock, flags); 986 987 kthread_run(xdbc_scrub_function, NULL, "%s", "xdbc"); 988 989 return 0; 990 991 free_and_quit: 992 xdbc_free_ring(&xdbc.evt_ring); 993 xdbc_free_ring(&xdbc.out_ring); 994 xdbc_free_ring(&xdbc.in_ring); 995 memblock_free(xdbc.table_dma, PAGE_SIZE); 996 memblock_free(xdbc.out_dma, PAGE_SIZE); 997 writel(0, &xdbc.xdbc_reg->control); 998 early_iounmap(xdbc.xhci_base, xdbc.xhci_length); 999 1000 return ret; 1001 } 1002 subsys_initcall(xdbc_init); 1003