1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * xHCI host controller driver 4 * 5 * Copyright (C) 2008 Intel Corp. 6 * 7 * Author: Sarah Sharp 8 * Some code borrowed from the Linux EHCI driver. 9 */ 10 11 #include <linux/usb.h> 12 #include <linux/pci.h> 13 #include <linux/slab.h> 14 #include <linux/dmapool.h> 15 #include <linux/dma-mapping.h> 16 17 #include "xhci.h" 18 #include "xhci-trace.h" 19 #include "xhci-debugfs.h" 20 21 /* 22 * Allocates a generic ring segment from the ring pool, sets the dma address, 23 * initializes the segment to zero, and sets the private next pointer to NULL. 24 * 25 * Section 4.11.1.1: 26 * "All components of all Command and Transfer TRBs shall be initialized to '0'" 27 */ 28 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, 29 unsigned int cycle_state, 30 unsigned int max_packet, 31 gfp_t flags) 32 { 33 struct xhci_segment *seg; 34 dma_addr_t dma; 35 int i; 36 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 37 38 seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev)); 39 if (!seg) 40 return NULL; 41 42 seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma); 43 if (!seg->trbs) { 44 kfree(seg); 45 return NULL; 46 } 47 48 if (max_packet) { 49 seg->bounce_buf = kzalloc_node(max_packet, flags, 50 dev_to_node(dev)); 51 if (!seg->bounce_buf) { 52 dma_pool_free(xhci->segment_pool, seg->trbs, dma); 53 kfree(seg); 54 return NULL; 55 } 56 } 57 /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ 58 if (cycle_state == 0) { 59 for (i = 0; i < TRBS_PER_SEGMENT; i++) 60 seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE); 61 } 62 seg->dma = dma; 63 seg->next = NULL; 64 65 return seg; 66 } 67 68 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) 69 { 70 if (seg->trbs) { 71 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); 72 seg->trbs = NULL; 73 } 74 kfree(seg->bounce_buf); 75 kfree(seg); 76 } 77 78 static void xhci_free_segments_for_ring(struct xhci_hcd *xhci, 79 struct xhci_segment *first) 80 { 81 struct xhci_segment *seg; 82 83 seg = first->next; 84 while (seg != first) { 85 struct xhci_segment *next = seg->next; 86 xhci_segment_free(xhci, seg); 87 seg = next; 88 } 89 xhci_segment_free(xhci, first); 90 } 91 92 /* 93 * Make the prev segment point to the next segment. 94 * 95 * Change the last TRB in the prev segment to be a Link TRB which points to the 96 * DMA address of the next segment. The caller needs to set any Link TRB 97 * related flags, such as End TRB, Toggle Cycle, and no snoop. 98 */ 99 static void xhci_link_segments(struct xhci_segment *prev, 100 struct xhci_segment *next, 101 enum xhci_ring_type type, bool chain_links) 102 { 103 u32 val; 104 105 if (!prev || !next) 106 return; 107 prev->next = next; 108 if (type != TYPE_EVENT) { 109 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = 110 cpu_to_le64(next->dma); 111 112 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ 113 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); 114 val &= ~TRB_TYPE_BITMASK; 115 val |= TRB_TYPE(TRB_LINK); 116 if (chain_links) 117 val |= TRB_CHAIN; 118 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); 119 } 120 } 121 122 /* 123 * Link the ring to the new segments. 124 * Set Toggle Cycle for the new ring if needed. 125 */ 126 static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring, 127 struct xhci_segment *first, struct xhci_segment *last, 128 unsigned int num_segs) 129 { 130 struct xhci_segment *next; 131 bool chain_links; 132 133 if (!ring || !first || !last) 134 return; 135 136 /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */ 137 chain_links = !!(xhci_link_trb_quirk(xhci) || 138 (ring->type == TYPE_ISOC && 139 (xhci->quirks & XHCI_AMD_0x96_HOST))); 140 141 next = ring->enq_seg->next; 142 xhci_link_segments(ring->enq_seg, first, ring->type, chain_links); 143 xhci_link_segments(last, next, ring->type, chain_links); 144 ring->num_segs += num_segs; 145 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs; 146 147 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) { 148 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control 149 &= ~cpu_to_le32(LINK_TOGGLE); 150 last->trbs[TRBS_PER_SEGMENT-1].link.control 151 |= cpu_to_le32(LINK_TOGGLE); 152 ring->last_seg = last; 153 } 154 } 155 156 /* 157 * We need a radix tree for mapping physical addresses of TRBs to which stream 158 * ID they belong to. We need to do this because the host controller won't tell 159 * us which stream ring the TRB came from. We could store the stream ID in an 160 * event data TRB, but that doesn't help us for the cancellation case, since the 161 * endpoint may stop before it reaches that event data TRB. 162 * 163 * The radix tree maps the upper portion of the TRB DMA address to a ring 164 * segment that has the same upper portion of DMA addresses. For example, say I 165 * have segments of size 1KB, that are always 1KB aligned. A segment may 166 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the 167 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to 168 * pass the radix tree a key to get the right stream ID: 169 * 170 * 0x10c90fff >> 10 = 0x43243 171 * 0x10c912c0 >> 10 = 0x43244 172 * 0x10c91400 >> 10 = 0x43245 173 * 174 * Obviously, only those TRBs with DMA addresses that are within the segment 175 * will make the radix tree return the stream ID for that ring. 176 * 177 * Caveats for the radix tree: 178 * 179 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an 180 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be 181 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the 182 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit 183 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit 184 * extended systems (where the DMA address can be bigger than 32-bits), 185 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that. 186 */ 187 static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map, 188 struct xhci_ring *ring, 189 struct xhci_segment *seg, 190 gfp_t mem_flags) 191 { 192 unsigned long key; 193 int ret; 194 195 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT); 196 /* Skip any segments that were already added. */ 197 if (radix_tree_lookup(trb_address_map, key)) 198 return 0; 199 200 ret = radix_tree_maybe_preload(mem_flags); 201 if (ret) 202 return ret; 203 ret = radix_tree_insert(trb_address_map, 204 key, ring); 205 radix_tree_preload_end(); 206 return ret; 207 } 208 209 static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map, 210 struct xhci_segment *seg) 211 { 212 unsigned long key; 213 214 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT); 215 if (radix_tree_lookup(trb_address_map, key)) 216 radix_tree_delete(trb_address_map, key); 217 } 218 219 static int xhci_update_stream_segment_mapping( 220 struct radix_tree_root *trb_address_map, 221 struct xhci_ring *ring, 222 struct xhci_segment *first_seg, 223 struct xhci_segment *last_seg, 224 gfp_t mem_flags) 225 { 226 struct xhci_segment *seg; 227 struct xhci_segment *failed_seg; 228 int ret; 229 230 if (WARN_ON_ONCE(trb_address_map == NULL)) 231 return 0; 232 233 seg = first_seg; 234 do { 235 ret = xhci_insert_segment_mapping(trb_address_map, 236 ring, seg, mem_flags); 237 if (ret) 238 goto remove_streams; 239 if (seg == last_seg) 240 return 0; 241 seg = seg->next; 242 } while (seg != first_seg); 243 244 return 0; 245 246 remove_streams: 247 failed_seg = seg; 248 seg = first_seg; 249 do { 250 xhci_remove_segment_mapping(trb_address_map, seg); 251 if (seg == failed_seg) 252 return ret; 253 seg = seg->next; 254 } while (seg != first_seg); 255 256 return ret; 257 } 258 259 static void xhci_remove_stream_mapping(struct xhci_ring *ring) 260 { 261 struct xhci_segment *seg; 262 263 if (WARN_ON_ONCE(ring->trb_address_map == NULL)) 264 return; 265 266 seg = ring->first_seg; 267 do { 268 xhci_remove_segment_mapping(ring->trb_address_map, seg); 269 seg = seg->next; 270 } while (seg != ring->first_seg); 271 } 272 273 static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags) 274 { 275 return xhci_update_stream_segment_mapping(ring->trb_address_map, ring, 276 ring->first_seg, ring->last_seg, mem_flags); 277 } 278 279 /* XXX: Do we need the hcd structure in all these functions? */ 280 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) 281 { 282 if (!ring) 283 return; 284 285 trace_xhci_ring_free(ring); 286 287 if (ring->first_seg) { 288 if (ring->type == TYPE_STREAM) 289 xhci_remove_stream_mapping(ring); 290 xhci_free_segments_for_ring(xhci, ring->first_seg); 291 } 292 293 kfree(ring); 294 } 295 296 void xhci_initialize_ring_info(struct xhci_ring *ring, 297 unsigned int cycle_state) 298 { 299 /* The ring is empty, so the enqueue pointer == dequeue pointer */ 300 ring->enqueue = ring->first_seg->trbs; 301 ring->enq_seg = ring->first_seg; 302 ring->dequeue = ring->enqueue; 303 ring->deq_seg = ring->first_seg; 304 /* The ring is initialized to 0. The producer must write 1 to the cycle 305 * bit to handover ownership of the TRB, so PCS = 1. The consumer must 306 * compare CCS to the cycle bit to check ownership, so CCS = 1. 307 * 308 * New rings are initialized with cycle state equal to 1; if we are 309 * handling ring expansion, set the cycle state equal to the old ring. 310 */ 311 ring->cycle_state = cycle_state; 312 313 /* 314 * Each segment has a link TRB, and leave an extra TRB for SW 315 * accounting purpose 316 */ 317 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; 318 } 319 320 /* Allocate segments and link them for a ring */ 321 static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, 322 struct xhci_segment **first, struct xhci_segment **last, 323 unsigned int num_segs, unsigned int cycle_state, 324 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) 325 { 326 struct xhci_segment *prev; 327 bool chain_links; 328 329 /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */ 330 chain_links = !!(xhci_link_trb_quirk(xhci) || 331 (type == TYPE_ISOC && 332 (xhci->quirks & XHCI_AMD_0x96_HOST))); 333 334 prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags); 335 if (!prev) 336 return -ENOMEM; 337 num_segs--; 338 339 *first = prev; 340 while (num_segs > 0) { 341 struct xhci_segment *next; 342 343 next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags); 344 if (!next) { 345 prev = *first; 346 while (prev) { 347 next = prev->next; 348 xhci_segment_free(xhci, prev); 349 prev = next; 350 } 351 return -ENOMEM; 352 } 353 xhci_link_segments(prev, next, type, chain_links); 354 355 prev = next; 356 num_segs--; 357 } 358 xhci_link_segments(prev, *first, type, chain_links); 359 *last = prev; 360 361 return 0; 362 } 363 364 /* 365 * Create a new ring with zero or more segments. 366 * 367 * Link each segment together into a ring. 368 * Set the end flag and the cycle toggle bit on the last segment. 369 * See section 4.9.1 and figures 15 and 16. 370 */ 371 struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, 372 unsigned int num_segs, unsigned int cycle_state, 373 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) 374 { 375 struct xhci_ring *ring; 376 int ret; 377 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 378 379 ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev)); 380 if (!ring) 381 return NULL; 382 383 ring->num_segs = num_segs; 384 ring->bounce_buf_len = max_packet; 385 INIT_LIST_HEAD(&ring->td_list); 386 ring->type = type; 387 if (num_segs == 0) 388 return ring; 389 390 ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, 391 &ring->last_seg, num_segs, cycle_state, type, 392 max_packet, flags); 393 if (ret) 394 goto fail; 395 396 /* Only event ring does not use link TRB */ 397 if (type != TYPE_EVENT) { 398 /* See section 4.9.2.1 and 6.4.4.1 */ 399 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= 400 cpu_to_le32(LINK_TOGGLE); 401 } 402 xhci_initialize_ring_info(ring, cycle_state); 403 trace_xhci_ring_alloc(ring); 404 return ring; 405 406 fail: 407 kfree(ring); 408 return NULL; 409 } 410 411 void xhci_free_endpoint_ring(struct xhci_hcd *xhci, 412 struct xhci_virt_device *virt_dev, 413 unsigned int ep_index) 414 { 415 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring); 416 virt_dev->eps[ep_index].ring = NULL; 417 } 418 419 /* 420 * Expand an existing ring. 421 * Allocate a new ring which has same segment numbers and link the two rings. 422 */ 423 int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, 424 unsigned int num_trbs, gfp_t flags) 425 { 426 struct xhci_segment *first; 427 struct xhci_segment *last; 428 unsigned int num_segs; 429 unsigned int num_segs_needed; 430 int ret; 431 432 num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) / 433 (TRBS_PER_SEGMENT - 1); 434 435 /* Allocate number of segments we needed, or double the ring size */ 436 num_segs = ring->num_segs > num_segs_needed ? 437 ring->num_segs : num_segs_needed; 438 439 ret = xhci_alloc_segments_for_ring(xhci, &first, &last, 440 num_segs, ring->cycle_state, ring->type, 441 ring->bounce_buf_len, flags); 442 if (ret) 443 return -ENOMEM; 444 445 if (ring->type == TYPE_STREAM) 446 ret = xhci_update_stream_segment_mapping(ring->trb_address_map, 447 ring, first, last, flags); 448 if (ret) { 449 struct xhci_segment *next; 450 do { 451 next = first->next; 452 xhci_segment_free(xhci, first); 453 if (first == last) 454 break; 455 first = next; 456 } while (true); 457 return ret; 458 } 459 460 xhci_link_rings(xhci, ring, first, last, num_segs); 461 trace_xhci_ring_expansion(ring); 462 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, 463 "ring expansion succeed, now has %d segments", 464 ring->num_segs); 465 466 return 0; 467 } 468 469 struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, 470 int type, gfp_t flags) 471 { 472 struct xhci_container_ctx *ctx; 473 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 474 475 if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)) 476 return NULL; 477 478 ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev)); 479 if (!ctx) 480 return NULL; 481 482 ctx->type = type; 483 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; 484 if (type == XHCI_CTX_TYPE_INPUT) 485 ctx->size += CTX_SIZE(xhci->hcc_params); 486 487 ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma); 488 if (!ctx->bytes) { 489 kfree(ctx); 490 return NULL; 491 } 492 return ctx; 493 } 494 495 void xhci_free_container_ctx(struct xhci_hcd *xhci, 496 struct xhci_container_ctx *ctx) 497 { 498 if (!ctx) 499 return; 500 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); 501 kfree(ctx); 502 } 503 504 struct xhci_input_control_ctx *xhci_get_input_control_ctx( 505 struct xhci_container_ctx *ctx) 506 { 507 if (ctx->type != XHCI_CTX_TYPE_INPUT) 508 return NULL; 509 510 return (struct xhci_input_control_ctx *)ctx->bytes; 511 } 512 513 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, 514 struct xhci_container_ctx *ctx) 515 { 516 if (ctx->type == XHCI_CTX_TYPE_DEVICE) 517 return (struct xhci_slot_ctx *)ctx->bytes; 518 519 return (struct xhci_slot_ctx *) 520 (ctx->bytes + CTX_SIZE(xhci->hcc_params)); 521 } 522 523 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, 524 struct xhci_container_ctx *ctx, 525 unsigned int ep_index) 526 { 527 /* increment ep index by offset of start of ep ctx array */ 528 ep_index++; 529 if (ctx->type == XHCI_CTX_TYPE_INPUT) 530 ep_index++; 531 532 return (struct xhci_ep_ctx *) 533 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); 534 } 535 536 537 /***************** Streams structures manipulation *************************/ 538 539 static void xhci_free_stream_ctx(struct xhci_hcd *xhci, 540 unsigned int num_stream_ctxs, 541 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) 542 { 543 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 544 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs; 545 546 if (size > MEDIUM_STREAM_ARRAY_SIZE) 547 dma_free_coherent(dev, size, 548 stream_ctx, dma); 549 else if (size <= SMALL_STREAM_ARRAY_SIZE) 550 return dma_pool_free(xhci->small_streams_pool, 551 stream_ctx, dma); 552 else 553 return dma_pool_free(xhci->medium_streams_pool, 554 stream_ctx, dma); 555 } 556 557 /* 558 * The stream context array for each endpoint with bulk streams enabled can 559 * vary in size, based on: 560 * - how many streams the endpoint supports, 561 * - the maximum primary stream array size the host controller supports, 562 * - and how many streams the device driver asks for. 563 * 564 * The stream context array must be a power of 2, and can be as small as 565 * 64 bytes or as large as 1MB. 566 */ 567 static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, 568 unsigned int num_stream_ctxs, dma_addr_t *dma, 569 gfp_t mem_flags) 570 { 571 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 572 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs; 573 574 if (size > MEDIUM_STREAM_ARRAY_SIZE) 575 return dma_alloc_coherent(dev, size, 576 dma, mem_flags); 577 else if (size <= SMALL_STREAM_ARRAY_SIZE) 578 return dma_pool_alloc(xhci->small_streams_pool, 579 mem_flags, dma); 580 else 581 return dma_pool_alloc(xhci->medium_streams_pool, 582 mem_flags, dma); 583 } 584 585 struct xhci_ring *xhci_dma_to_transfer_ring( 586 struct xhci_virt_ep *ep, 587 u64 address) 588 { 589 if (ep->ep_state & EP_HAS_STREAMS) 590 return radix_tree_lookup(&ep->stream_info->trb_address_map, 591 address >> TRB_SEGMENT_SHIFT); 592 return ep->ring; 593 } 594 595 struct xhci_ring *xhci_stream_id_to_ring( 596 struct xhci_virt_device *dev, 597 unsigned int ep_index, 598 unsigned int stream_id) 599 { 600 struct xhci_virt_ep *ep = &dev->eps[ep_index]; 601 602 if (stream_id == 0) 603 return ep->ring; 604 if (!ep->stream_info) 605 return NULL; 606 607 if (stream_id >= ep->stream_info->num_streams) 608 return NULL; 609 return ep->stream_info->stream_rings[stream_id]; 610 } 611 612 /* 613 * Change an endpoint's internal structure so it supports stream IDs. The 614 * number of requested streams includes stream 0, which cannot be used by device 615 * drivers. 616 * 617 * The number of stream contexts in the stream context array may be bigger than 618 * the number of streams the driver wants to use. This is because the number of 619 * stream context array entries must be a power of two. 620 */ 621 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, 622 unsigned int num_stream_ctxs, 623 unsigned int num_streams, 624 unsigned int max_packet, gfp_t mem_flags) 625 { 626 struct xhci_stream_info *stream_info; 627 u32 cur_stream; 628 struct xhci_ring *cur_ring; 629 u64 addr; 630 int ret; 631 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 632 633 xhci_dbg(xhci, "Allocating %u streams and %u " 634 "stream context array entries.\n", 635 num_streams, num_stream_ctxs); 636 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) { 637 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n"); 638 return NULL; 639 } 640 xhci->cmd_ring_reserved_trbs++; 641 642 stream_info = kzalloc_node(sizeof(*stream_info), mem_flags, 643 dev_to_node(dev)); 644 if (!stream_info) 645 goto cleanup_trbs; 646 647 stream_info->num_streams = num_streams; 648 stream_info->num_stream_ctxs = num_stream_ctxs; 649 650 /* Initialize the array of virtual pointers to stream rings. */ 651 stream_info->stream_rings = kcalloc_node( 652 num_streams, sizeof(struct xhci_ring *), mem_flags, 653 dev_to_node(dev)); 654 if (!stream_info->stream_rings) 655 goto cleanup_info; 656 657 /* Initialize the array of DMA addresses for stream rings for the HW. */ 658 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci, 659 num_stream_ctxs, &stream_info->ctx_array_dma, 660 mem_flags); 661 if (!stream_info->stream_ctx_array) 662 goto cleanup_ctx; 663 memset(stream_info->stream_ctx_array, 0, 664 sizeof(struct xhci_stream_ctx)*num_stream_ctxs); 665 666 /* Allocate everything needed to free the stream rings later */ 667 stream_info->free_streams_command = 668 xhci_alloc_command_with_ctx(xhci, true, mem_flags); 669 if (!stream_info->free_streams_command) 670 goto cleanup_ctx; 671 672 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC); 673 674 /* Allocate rings for all the streams that the driver will use, 675 * and add their segment DMA addresses to the radix tree. 676 * Stream 0 is reserved. 677 */ 678 679 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 680 stream_info->stream_rings[cur_stream] = 681 xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet, 682 mem_flags); 683 cur_ring = stream_info->stream_rings[cur_stream]; 684 if (!cur_ring) 685 goto cleanup_rings; 686 cur_ring->stream_id = cur_stream; 687 cur_ring->trb_address_map = &stream_info->trb_address_map; 688 /* Set deq ptr, cycle bit, and stream context type */ 689 addr = cur_ring->first_seg->dma | 690 SCT_FOR_CTX(SCT_PRI_TR) | 691 cur_ring->cycle_state; 692 stream_info->stream_ctx_array[cur_stream].stream_ring = 693 cpu_to_le64(addr); 694 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", 695 cur_stream, (unsigned long long) addr); 696 697 ret = xhci_update_stream_mapping(cur_ring, mem_flags); 698 if (ret) { 699 xhci_ring_free(xhci, cur_ring); 700 stream_info->stream_rings[cur_stream] = NULL; 701 goto cleanup_rings; 702 } 703 } 704 /* Leave the other unused stream ring pointers in the stream context 705 * array initialized to zero. This will cause the xHC to give us an 706 * error if the device asks for a stream ID we don't have setup (if it 707 * was any other way, the host controller would assume the ring is 708 * "empty" and wait forever for data to be queued to that stream ID). 709 */ 710 711 return stream_info; 712 713 cleanup_rings: 714 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 715 cur_ring = stream_info->stream_rings[cur_stream]; 716 if (cur_ring) { 717 xhci_ring_free(xhci, cur_ring); 718 stream_info->stream_rings[cur_stream] = NULL; 719 } 720 } 721 xhci_free_command(xhci, stream_info->free_streams_command); 722 cleanup_ctx: 723 kfree(stream_info->stream_rings); 724 cleanup_info: 725 kfree(stream_info); 726 cleanup_trbs: 727 xhci->cmd_ring_reserved_trbs--; 728 return NULL; 729 } 730 /* 731 * Sets the MaxPStreams field and the Linear Stream Array field. 732 * Sets the dequeue pointer to the stream context array. 733 */ 734 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, 735 struct xhci_ep_ctx *ep_ctx, 736 struct xhci_stream_info *stream_info) 737 { 738 u32 max_primary_streams; 739 /* MaxPStreams is the number of stream context array entries, not the 740 * number we're actually using. Must be in 2^(MaxPstreams + 1) format. 741 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc. 742 */ 743 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; 744 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 745 "Setting number of stream ctx array entries to %u", 746 1 << (max_primary_streams + 1)); 747 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK); 748 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) 749 | EP_HAS_LSA); 750 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma); 751 } 752 753 /* 754 * Sets the MaxPStreams field and the Linear Stream Array field to 0. 755 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark, 756 * not at the beginning of the ring). 757 */ 758 void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx, 759 struct xhci_virt_ep *ep) 760 { 761 dma_addr_t addr; 762 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA)); 763 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); 764 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state); 765 } 766 767 /* Frees all stream contexts associated with the endpoint, 768 * 769 * Caller should fix the endpoint context streams fields. 770 */ 771 void xhci_free_stream_info(struct xhci_hcd *xhci, 772 struct xhci_stream_info *stream_info) 773 { 774 int cur_stream; 775 struct xhci_ring *cur_ring; 776 777 if (!stream_info) 778 return; 779 780 for (cur_stream = 1; cur_stream < stream_info->num_streams; 781 cur_stream++) { 782 cur_ring = stream_info->stream_rings[cur_stream]; 783 if (cur_ring) { 784 xhci_ring_free(xhci, cur_ring); 785 stream_info->stream_rings[cur_stream] = NULL; 786 } 787 } 788 xhci_free_command(xhci, stream_info->free_streams_command); 789 xhci->cmd_ring_reserved_trbs--; 790 if (stream_info->stream_ctx_array) 791 xhci_free_stream_ctx(xhci, 792 stream_info->num_stream_ctxs, 793 stream_info->stream_ctx_array, 794 stream_info->ctx_array_dma); 795 796 kfree(stream_info->stream_rings); 797 kfree(stream_info); 798 } 799 800 801 /***************** Device context manipulation *************************/ 802 803 static void xhci_init_endpoint_timer(struct xhci_hcd *xhci, 804 struct xhci_virt_ep *ep) 805 { 806 timer_setup(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog, 807 0); 808 ep->xhci = xhci; 809 } 810 811 static void xhci_free_tt_info(struct xhci_hcd *xhci, 812 struct xhci_virt_device *virt_dev, 813 int slot_id) 814 { 815 struct list_head *tt_list_head; 816 struct xhci_tt_bw_info *tt_info, *next; 817 bool slot_found = false; 818 819 /* If the device never made it past the Set Address stage, 820 * it may not have the real_port set correctly. 821 */ 822 if (virt_dev->real_port == 0 || 823 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) { 824 xhci_dbg(xhci, "Bad real port.\n"); 825 return; 826 } 827 828 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts); 829 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { 830 /* Multi-TT hubs will have more than one entry */ 831 if (tt_info->slot_id == slot_id) { 832 slot_found = true; 833 list_del(&tt_info->tt_list); 834 kfree(tt_info); 835 } else if (slot_found) { 836 break; 837 } 838 } 839 } 840 841 int xhci_alloc_tt_info(struct xhci_hcd *xhci, 842 struct xhci_virt_device *virt_dev, 843 struct usb_device *hdev, 844 struct usb_tt *tt, gfp_t mem_flags) 845 { 846 struct xhci_tt_bw_info *tt_info; 847 unsigned int num_ports; 848 int i, j; 849 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 850 851 if (!tt->multi) 852 num_ports = 1; 853 else 854 num_ports = hdev->maxchild; 855 856 for (i = 0; i < num_ports; i++, tt_info++) { 857 struct xhci_interval_bw_table *bw_table; 858 859 tt_info = kzalloc_node(sizeof(*tt_info), mem_flags, 860 dev_to_node(dev)); 861 if (!tt_info) 862 goto free_tts; 863 INIT_LIST_HEAD(&tt_info->tt_list); 864 list_add(&tt_info->tt_list, 865 &xhci->rh_bw[virt_dev->real_port - 1].tts); 866 tt_info->slot_id = virt_dev->udev->slot_id; 867 if (tt->multi) 868 tt_info->ttport = i+1; 869 bw_table = &tt_info->bw_table; 870 for (j = 0; j < XHCI_MAX_INTERVAL; j++) 871 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints); 872 } 873 return 0; 874 875 free_tts: 876 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id); 877 return -ENOMEM; 878 } 879 880 881 /* All the xhci_tds in the ring's TD list should be freed at this point. 882 * Should be called with xhci->lock held if there is any chance the TT lists 883 * will be manipulated by the configure endpoint, allocate device, or update 884 * hub functions while this function is removing the TT entries from the list. 885 */ 886 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) 887 { 888 struct xhci_virt_device *dev; 889 int i; 890 int old_active_eps = 0; 891 892 /* Slot ID 0 is reserved */ 893 if (slot_id == 0 || !xhci->devs[slot_id]) 894 return; 895 896 dev = xhci->devs[slot_id]; 897 898 xhci->dcbaa->dev_context_ptrs[slot_id] = 0; 899 if (!dev) 900 return; 901 902 trace_xhci_free_virt_device(dev); 903 904 if (dev->tt_info) 905 old_active_eps = dev->tt_info->active_eps; 906 907 for (i = 0; i < 31; i++) { 908 if (dev->eps[i].ring) 909 xhci_ring_free(xhci, dev->eps[i].ring); 910 if (dev->eps[i].stream_info) 911 xhci_free_stream_info(xhci, 912 dev->eps[i].stream_info); 913 /* Endpoints on the TT/root port lists should have been removed 914 * when usb_disable_device() was called for the device. 915 * We can't drop them anyway, because the udev might have gone 916 * away by this point, and we can't tell what speed it was. 917 */ 918 if (!list_empty(&dev->eps[i].bw_endpoint_list)) 919 xhci_warn(xhci, "Slot %u endpoint %u " 920 "not removed from BW list!\n", 921 slot_id, i); 922 } 923 /* If this is a hub, free the TT(s) from the TT list */ 924 xhci_free_tt_info(xhci, dev, slot_id); 925 /* If necessary, update the number of active TTs on this root port */ 926 xhci_update_tt_active_eps(xhci, dev, old_active_eps); 927 928 if (dev->in_ctx) 929 xhci_free_container_ctx(xhci, dev->in_ctx); 930 if (dev->out_ctx) 931 xhci_free_container_ctx(xhci, dev->out_ctx); 932 933 if (dev->udev && dev->udev->slot_id) 934 dev->udev->slot_id = 0; 935 kfree(xhci->devs[slot_id]); 936 xhci->devs[slot_id] = NULL; 937 } 938 939 /* 940 * Free a virt_device structure. 941 * If the virt_device added a tt_info (a hub) and has children pointing to 942 * that tt_info, then free the child first. Recursive. 943 * We can't rely on udev at this point to find child-parent relationships. 944 */ 945 static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) 946 { 947 struct xhci_virt_device *vdev; 948 struct list_head *tt_list_head; 949 struct xhci_tt_bw_info *tt_info, *next; 950 int i; 951 952 vdev = xhci->devs[slot_id]; 953 if (!vdev) 954 return; 955 956 if (vdev->real_port == 0 || 957 vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) { 958 xhci_dbg(xhci, "Bad vdev->real_port.\n"); 959 goto out; 960 } 961 962 tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts); 963 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { 964 /* is this a hub device that added a tt_info to the tts list */ 965 if (tt_info->slot_id == slot_id) { 966 /* are any devices using this tt_info? */ 967 for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) { 968 vdev = xhci->devs[i]; 969 if (vdev && (vdev->tt_info == tt_info)) 970 xhci_free_virt_devices_depth_first( 971 xhci, i); 972 } 973 } 974 } 975 out: 976 /* we are now at a leaf device */ 977 xhci_debugfs_remove_slot(xhci, slot_id); 978 xhci_free_virt_device(xhci, slot_id); 979 } 980 981 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, 982 struct usb_device *udev, gfp_t flags) 983 { 984 struct xhci_virt_device *dev; 985 int i; 986 987 /* Slot ID 0 is reserved */ 988 if (slot_id == 0 || xhci->devs[slot_id]) { 989 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id); 990 return 0; 991 } 992 993 dev = kzalloc(sizeof(*dev), flags); 994 if (!dev) 995 return 0; 996 997 /* Allocate the (output) device context that will be used in the HC. */ 998 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); 999 if (!dev->out_ctx) 1000 goto fail; 1001 1002 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, 1003 (unsigned long long)dev->out_ctx->dma); 1004 1005 /* Allocate the (input) device context for address device command */ 1006 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); 1007 if (!dev->in_ctx) 1008 goto fail; 1009 1010 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, 1011 (unsigned long long)dev->in_ctx->dma); 1012 1013 /* Initialize the cancellation list and watchdog timers for each ep */ 1014 for (i = 0; i < 31; i++) { 1015 xhci_init_endpoint_timer(xhci, &dev->eps[i]); 1016 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); 1017 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list); 1018 } 1019 1020 /* Allocate endpoint 0 ring */ 1021 dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags); 1022 if (!dev->eps[0].ring) 1023 goto fail; 1024 1025 dev->udev = udev; 1026 1027 /* Point to output device context in dcbaa. */ 1028 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma); 1029 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", 1030 slot_id, 1031 &xhci->dcbaa->dev_context_ptrs[slot_id], 1032 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); 1033 1034 trace_xhci_alloc_virt_device(dev); 1035 1036 xhci->devs[slot_id] = dev; 1037 1038 return 1; 1039 fail: 1040 1041 if (dev->in_ctx) 1042 xhci_free_container_ctx(xhci, dev->in_ctx); 1043 if (dev->out_ctx) 1044 xhci_free_container_ctx(xhci, dev->out_ctx); 1045 kfree(dev); 1046 1047 return 0; 1048 } 1049 1050 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, 1051 struct usb_device *udev) 1052 { 1053 struct xhci_virt_device *virt_dev; 1054 struct xhci_ep_ctx *ep0_ctx; 1055 struct xhci_ring *ep_ring; 1056 1057 virt_dev = xhci->devs[udev->slot_id]; 1058 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0); 1059 ep_ring = virt_dev->eps[0].ring; 1060 /* 1061 * FIXME we don't keep track of the dequeue pointer very well after a 1062 * Set TR dequeue pointer, so we're setting the dequeue pointer of the 1063 * host to our enqueue pointer. This should only be called after a 1064 * configured device has reset, so all control transfers should have 1065 * been completed or cancelled before the reset. 1066 */ 1067 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg, 1068 ep_ring->enqueue) 1069 | ep_ring->cycle_state); 1070 } 1071 1072 /* 1073 * The xHCI roothub may have ports of differing speeds in any order in the port 1074 * status registers. 1075 * 1076 * The xHCI hardware wants to know the roothub port number that the USB device 1077 * is attached to (or the roothub port its ancestor hub is attached to). All we 1078 * know is the index of that port under either the USB 2.0 or the USB 3.0 1079 * roothub, but that doesn't give us the real index into the HW port status 1080 * registers. Call xhci_find_raw_port_number() to get real index. 1081 */ 1082 static u32 xhci_find_real_port_number(struct xhci_hcd *xhci, 1083 struct usb_device *udev) 1084 { 1085 struct usb_device *top_dev; 1086 struct usb_hcd *hcd; 1087 1088 if (udev->speed >= USB_SPEED_SUPER) 1089 hcd = xhci->shared_hcd; 1090 else 1091 hcd = xhci->main_hcd; 1092 1093 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 1094 top_dev = top_dev->parent) 1095 /* Found device below root hub */; 1096 1097 return xhci_find_raw_port_number(hcd, top_dev->portnum); 1098 } 1099 1100 /* Setup an xHCI virtual device for a Set Address command */ 1101 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) 1102 { 1103 struct xhci_virt_device *dev; 1104 struct xhci_ep_ctx *ep0_ctx; 1105 struct xhci_slot_ctx *slot_ctx; 1106 u32 port_num; 1107 u32 max_packets; 1108 struct usb_device *top_dev; 1109 1110 dev = xhci->devs[udev->slot_id]; 1111 /* Slot ID 0 is reserved */ 1112 if (udev->slot_id == 0 || !dev) { 1113 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n", 1114 udev->slot_id); 1115 return -EINVAL; 1116 } 1117 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); 1118 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); 1119 1120 /* 3) Only the control endpoint is valid - one endpoint context */ 1121 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); 1122 switch (udev->speed) { 1123 case USB_SPEED_SUPER_PLUS: 1124 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP); 1125 max_packets = MAX_PACKET(512); 1126 break; 1127 case USB_SPEED_SUPER: 1128 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS); 1129 max_packets = MAX_PACKET(512); 1130 break; 1131 case USB_SPEED_HIGH: 1132 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS); 1133 max_packets = MAX_PACKET(64); 1134 break; 1135 /* USB core guesses at a 64-byte max packet first for FS devices */ 1136 case USB_SPEED_FULL: 1137 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS); 1138 max_packets = MAX_PACKET(64); 1139 break; 1140 case USB_SPEED_LOW: 1141 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS); 1142 max_packets = MAX_PACKET(8); 1143 break; 1144 case USB_SPEED_WIRELESS: 1145 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 1146 return -EINVAL; 1147 break; 1148 default: 1149 /* Speed was set earlier, this shouldn't happen. */ 1150 return -EINVAL; 1151 } 1152 /* Find the root hub port this device is under */ 1153 port_num = xhci_find_real_port_number(xhci, udev); 1154 if (!port_num) 1155 return -EINVAL; 1156 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num)); 1157 /* Set the port number in the virtual_device to the faked port number */ 1158 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 1159 top_dev = top_dev->parent) 1160 /* Found device below root hub */; 1161 dev->fake_port = top_dev->portnum; 1162 dev->real_port = port_num; 1163 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num); 1164 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port); 1165 1166 /* Find the right bandwidth table that this device will be a part of. 1167 * If this is a full speed device attached directly to a root port (or a 1168 * decendent of one), it counts as a primary bandwidth domain, not a 1169 * secondary bandwidth domain under a TT. An xhci_tt_info structure 1170 * will never be created for the HS root hub. 1171 */ 1172 if (!udev->tt || !udev->tt->hub->parent) { 1173 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table; 1174 } else { 1175 struct xhci_root_port_bw_info *rh_bw; 1176 struct xhci_tt_bw_info *tt_bw; 1177 1178 rh_bw = &xhci->rh_bw[port_num - 1]; 1179 /* Find the right TT. */ 1180 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) { 1181 if (tt_bw->slot_id != udev->tt->hub->slot_id) 1182 continue; 1183 1184 if (!dev->udev->tt->multi || 1185 (udev->tt->multi && 1186 tt_bw->ttport == dev->udev->ttport)) { 1187 dev->bw_table = &tt_bw->bw_table; 1188 dev->tt_info = tt_bw; 1189 break; 1190 } 1191 } 1192 if (!dev->tt_info) 1193 xhci_warn(xhci, "WARN: Didn't find a matching TT\n"); 1194 } 1195 1196 /* Is this a LS/FS device under an external HS hub? */ 1197 if (udev->tt && udev->tt->hub->parent) { 1198 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id | 1199 (udev->ttport << 8)); 1200 if (udev->tt->multi) 1201 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 1202 } 1203 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); 1204 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); 1205 1206 /* Step 4 - ring already allocated */ 1207 /* Step 5 */ 1208 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP)); 1209 1210 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ 1211 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) | 1212 max_packets); 1213 1214 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma | 1215 dev->eps[0].ring->cycle_state); 1216 1217 trace_xhci_setup_addressable_virt_device(dev); 1218 1219 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ 1220 1221 return 0; 1222 } 1223 1224 /* 1225 * Convert interval expressed as 2^(bInterval - 1) == interval into 1226 * straight exponent value 2^n == interval. 1227 * 1228 */ 1229 static unsigned int xhci_parse_exponent_interval(struct usb_device *udev, 1230 struct usb_host_endpoint *ep) 1231 { 1232 unsigned int interval; 1233 1234 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1; 1235 if (interval != ep->desc.bInterval - 1) 1236 dev_warn(&udev->dev, 1237 "ep %#x - rounding interval to %d %sframes\n", 1238 ep->desc.bEndpointAddress, 1239 1 << interval, 1240 udev->speed == USB_SPEED_FULL ? "" : "micro"); 1241 1242 if (udev->speed == USB_SPEED_FULL) { 1243 /* 1244 * Full speed isoc endpoints specify interval in frames, 1245 * not microframes. We are using microframes everywhere, 1246 * so adjust accordingly. 1247 */ 1248 interval += 3; /* 1 frame = 2^3 uframes */ 1249 } 1250 1251 return interval; 1252 } 1253 1254 /* 1255 * Convert bInterval expressed in microframes (in 1-255 range) to exponent of 1256 * microframes, rounded down to nearest power of 2. 1257 */ 1258 static unsigned int xhci_microframes_to_exponent(struct usb_device *udev, 1259 struct usb_host_endpoint *ep, unsigned int desc_interval, 1260 unsigned int min_exponent, unsigned int max_exponent) 1261 { 1262 unsigned int interval; 1263 1264 interval = fls(desc_interval) - 1; 1265 interval = clamp_val(interval, min_exponent, max_exponent); 1266 if ((1 << interval) != desc_interval) 1267 dev_dbg(&udev->dev, 1268 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n", 1269 ep->desc.bEndpointAddress, 1270 1 << interval, 1271 desc_interval); 1272 1273 return interval; 1274 } 1275 1276 static unsigned int xhci_parse_microframe_interval(struct usb_device *udev, 1277 struct usb_host_endpoint *ep) 1278 { 1279 if (ep->desc.bInterval == 0) 1280 return 0; 1281 return xhci_microframes_to_exponent(udev, ep, 1282 ep->desc.bInterval, 0, 15); 1283 } 1284 1285 1286 static unsigned int xhci_parse_frame_interval(struct usb_device *udev, 1287 struct usb_host_endpoint *ep) 1288 { 1289 return xhci_microframes_to_exponent(udev, ep, 1290 ep->desc.bInterval * 8, 3, 10); 1291 } 1292 1293 /* Return the polling or NAK interval. 1294 * 1295 * The polling interval is expressed in "microframes". If xHCI's Interval field 1296 * is set to N, it will service the endpoint every 2^(Interval)*125us. 1297 * 1298 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval 1299 * is set to 0. 1300 */ 1301 static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, 1302 struct usb_host_endpoint *ep) 1303 { 1304 unsigned int interval = 0; 1305 1306 switch (udev->speed) { 1307 case USB_SPEED_HIGH: 1308 /* Max NAK rate */ 1309 if (usb_endpoint_xfer_control(&ep->desc) || 1310 usb_endpoint_xfer_bulk(&ep->desc)) { 1311 interval = xhci_parse_microframe_interval(udev, ep); 1312 break; 1313 } 1314 fallthrough; /* SS and HS isoc/int have same decoding */ 1315 1316 case USB_SPEED_SUPER_PLUS: 1317 case USB_SPEED_SUPER: 1318 if (usb_endpoint_xfer_int(&ep->desc) || 1319 usb_endpoint_xfer_isoc(&ep->desc)) { 1320 interval = xhci_parse_exponent_interval(udev, ep); 1321 } 1322 break; 1323 1324 case USB_SPEED_FULL: 1325 if (usb_endpoint_xfer_isoc(&ep->desc)) { 1326 interval = xhci_parse_exponent_interval(udev, ep); 1327 break; 1328 } 1329 /* 1330 * Fall through for interrupt endpoint interval decoding 1331 * since it uses the same rules as low speed interrupt 1332 * endpoints. 1333 */ 1334 fallthrough; 1335 1336 case USB_SPEED_LOW: 1337 if (usb_endpoint_xfer_int(&ep->desc) || 1338 usb_endpoint_xfer_isoc(&ep->desc)) { 1339 1340 interval = xhci_parse_frame_interval(udev, ep); 1341 } 1342 break; 1343 1344 default: 1345 BUG(); 1346 } 1347 return interval; 1348 } 1349 1350 /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps. 1351 * High speed endpoint descriptors can define "the number of additional 1352 * transaction opportunities per microframe", but that goes in the Max Burst 1353 * endpoint context field. 1354 */ 1355 static u32 xhci_get_endpoint_mult(struct usb_device *udev, 1356 struct usb_host_endpoint *ep) 1357 { 1358 if (udev->speed < USB_SPEED_SUPER || 1359 !usb_endpoint_xfer_isoc(&ep->desc)) 1360 return 0; 1361 return ep->ss_ep_comp.bmAttributes; 1362 } 1363 1364 static u32 xhci_get_endpoint_max_burst(struct usb_device *udev, 1365 struct usb_host_endpoint *ep) 1366 { 1367 /* Super speed and Plus have max burst in ep companion desc */ 1368 if (udev->speed >= USB_SPEED_SUPER) 1369 return ep->ss_ep_comp.bMaxBurst; 1370 1371 if (udev->speed == USB_SPEED_HIGH && 1372 (usb_endpoint_xfer_isoc(&ep->desc) || 1373 usb_endpoint_xfer_int(&ep->desc))) 1374 return usb_endpoint_maxp_mult(&ep->desc) - 1; 1375 1376 return 0; 1377 } 1378 1379 static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep) 1380 { 1381 int in; 1382 1383 in = usb_endpoint_dir_in(&ep->desc); 1384 1385 switch (usb_endpoint_type(&ep->desc)) { 1386 case USB_ENDPOINT_XFER_CONTROL: 1387 return CTRL_EP; 1388 case USB_ENDPOINT_XFER_BULK: 1389 return in ? BULK_IN_EP : BULK_OUT_EP; 1390 case USB_ENDPOINT_XFER_ISOC: 1391 return in ? ISOC_IN_EP : ISOC_OUT_EP; 1392 case USB_ENDPOINT_XFER_INT: 1393 return in ? INT_IN_EP : INT_OUT_EP; 1394 } 1395 return 0; 1396 } 1397 1398 /* Return the maximum endpoint service interval time (ESIT) payload. 1399 * Basically, this is the maxpacket size, multiplied by the burst size 1400 * and mult size. 1401 */ 1402 static u32 xhci_get_max_esit_payload(struct usb_device *udev, 1403 struct usb_host_endpoint *ep) 1404 { 1405 int max_burst; 1406 int max_packet; 1407 1408 /* Only applies for interrupt or isochronous endpoints */ 1409 if (usb_endpoint_xfer_control(&ep->desc) || 1410 usb_endpoint_xfer_bulk(&ep->desc)) 1411 return 0; 1412 1413 /* SuperSpeedPlus Isoc ep sending over 48k per esit */ 1414 if ((udev->speed >= USB_SPEED_SUPER_PLUS) && 1415 USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes)) 1416 return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval); 1417 /* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */ 1418 else if (udev->speed >= USB_SPEED_SUPER) 1419 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval); 1420 1421 max_packet = usb_endpoint_maxp(&ep->desc); 1422 max_burst = usb_endpoint_maxp_mult(&ep->desc); 1423 /* A 0 in max burst means 1 transfer per ESIT */ 1424 return max_packet * max_burst; 1425 } 1426 1427 /* Set up an endpoint with one ring segment. Do not allocate stream rings. 1428 * Drivers will have to call usb_alloc_streams() to do that. 1429 */ 1430 int xhci_endpoint_init(struct xhci_hcd *xhci, 1431 struct xhci_virt_device *virt_dev, 1432 struct usb_device *udev, 1433 struct usb_host_endpoint *ep, 1434 gfp_t mem_flags) 1435 { 1436 unsigned int ep_index; 1437 struct xhci_ep_ctx *ep_ctx; 1438 struct xhci_ring *ep_ring; 1439 unsigned int max_packet; 1440 enum xhci_ring_type ring_type; 1441 u32 max_esit_payload; 1442 u32 endpoint_type; 1443 unsigned int max_burst; 1444 unsigned int interval; 1445 unsigned int mult; 1446 unsigned int avg_trb_len; 1447 unsigned int err_count = 0; 1448 1449 ep_index = xhci_get_endpoint_index(&ep->desc); 1450 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 1451 1452 endpoint_type = xhci_get_endpoint_type(ep); 1453 if (!endpoint_type) 1454 return -EINVAL; 1455 1456 ring_type = usb_endpoint_type(&ep->desc); 1457 1458 /* 1459 * Get values to fill the endpoint context, mostly from ep descriptor. 1460 * The average TRB buffer lengt for bulk endpoints is unclear as we 1461 * have no clue on scatter gather list entry size. For Isoc and Int, 1462 * set it to max available. See xHCI 1.1 spec 4.14.1.1 for details. 1463 */ 1464 max_esit_payload = xhci_get_max_esit_payload(udev, ep); 1465 interval = xhci_get_endpoint_interval(udev, ep); 1466 1467 /* Periodic endpoint bInterval limit quirk */ 1468 if (usb_endpoint_xfer_int(&ep->desc) || 1469 usb_endpoint_xfer_isoc(&ep->desc)) { 1470 if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) && 1471 udev->speed >= USB_SPEED_HIGH && 1472 interval >= 7) { 1473 interval = 6; 1474 } 1475 } 1476 1477 mult = xhci_get_endpoint_mult(udev, ep); 1478 max_packet = usb_endpoint_maxp(&ep->desc); 1479 max_burst = xhci_get_endpoint_max_burst(udev, ep); 1480 avg_trb_len = max_esit_payload; 1481 1482 /* FIXME dig Mult and streams info out of ep companion desc */ 1483 1484 /* Allow 3 retries for everything but isoc, set CErr = 3 */ 1485 if (!usb_endpoint_xfer_isoc(&ep->desc)) 1486 err_count = 3; 1487 /* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */ 1488 if (usb_endpoint_xfer_bulk(&ep->desc)) { 1489 if (udev->speed == USB_SPEED_HIGH) 1490 max_packet = 512; 1491 if (udev->speed == USB_SPEED_FULL) { 1492 max_packet = rounddown_pow_of_two(max_packet); 1493 max_packet = clamp_val(max_packet, 8, 64); 1494 } 1495 } 1496 /* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */ 1497 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100) 1498 avg_trb_len = 8; 1499 /* xhci 1.1 with LEC support doesn't use mult field, use RsvdZ */ 1500 if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2)) 1501 mult = 0; 1502 1503 /* Set up the endpoint ring */ 1504 virt_dev->eps[ep_index].new_ring = 1505 xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags); 1506 if (!virt_dev->eps[ep_index].new_ring) 1507 return -ENOMEM; 1508 1509 virt_dev->eps[ep_index].skip = false; 1510 ep_ring = virt_dev->eps[ep_index].new_ring; 1511 1512 /* Fill the endpoint context */ 1513 ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) | 1514 EP_INTERVAL(interval) | 1515 EP_MULT(mult)); 1516 ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) | 1517 MAX_PACKET(max_packet) | 1518 MAX_BURST(max_burst) | 1519 ERROR_COUNT(err_count)); 1520 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | 1521 ep_ring->cycle_state); 1522 1523 ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) | 1524 EP_AVG_TRB_LENGTH(avg_trb_len)); 1525 1526 return 0; 1527 } 1528 1529 void xhci_endpoint_zero(struct xhci_hcd *xhci, 1530 struct xhci_virt_device *virt_dev, 1531 struct usb_host_endpoint *ep) 1532 { 1533 unsigned int ep_index; 1534 struct xhci_ep_ctx *ep_ctx; 1535 1536 ep_index = xhci_get_endpoint_index(&ep->desc); 1537 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 1538 1539 ep_ctx->ep_info = 0; 1540 ep_ctx->ep_info2 = 0; 1541 ep_ctx->deq = 0; 1542 ep_ctx->tx_info = 0; 1543 /* Don't free the endpoint ring until the set interface or configuration 1544 * request succeeds. 1545 */ 1546 } 1547 1548 void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info) 1549 { 1550 bw_info->ep_interval = 0; 1551 bw_info->mult = 0; 1552 bw_info->num_packets = 0; 1553 bw_info->max_packet_size = 0; 1554 bw_info->type = 0; 1555 bw_info->max_esit_payload = 0; 1556 } 1557 1558 void xhci_update_bw_info(struct xhci_hcd *xhci, 1559 struct xhci_container_ctx *in_ctx, 1560 struct xhci_input_control_ctx *ctrl_ctx, 1561 struct xhci_virt_device *virt_dev) 1562 { 1563 struct xhci_bw_info *bw_info; 1564 struct xhci_ep_ctx *ep_ctx; 1565 unsigned int ep_type; 1566 int i; 1567 1568 for (i = 1; i < 31; i++) { 1569 bw_info = &virt_dev->eps[i].bw_info; 1570 1571 /* We can't tell what endpoint type is being dropped, but 1572 * unconditionally clearing the bandwidth info for non-periodic 1573 * endpoints should be harmless because the info will never be 1574 * set in the first place. 1575 */ 1576 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) { 1577 /* Dropped endpoint */ 1578 xhci_clear_endpoint_bw_info(bw_info); 1579 continue; 1580 } 1581 1582 if (EP_IS_ADDED(ctrl_ctx, i)) { 1583 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i); 1584 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2)); 1585 1586 /* Ignore non-periodic endpoints */ 1587 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && 1588 ep_type != ISOC_IN_EP && 1589 ep_type != INT_IN_EP) 1590 continue; 1591 1592 /* Added or changed endpoint */ 1593 bw_info->ep_interval = CTX_TO_EP_INTERVAL( 1594 le32_to_cpu(ep_ctx->ep_info)); 1595 /* Number of packets and mult are zero-based in the 1596 * input context, but we want one-based for the 1597 * interval table. 1598 */ 1599 bw_info->mult = CTX_TO_EP_MULT( 1600 le32_to_cpu(ep_ctx->ep_info)) + 1; 1601 bw_info->num_packets = CTX_TO_MAX_BURST( 1602 le32_to_cpu(ep_ctx->ep_info2)) + 1; 1603 bw_info->max_packet_size = MAX_PACKET_DECODED( 1604 le32_to_cpu(ep_ctx->ep_info2)); 1605 bw_info->type = ep_type; 1606 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD( 1607 le32_to_cpu(ep_ctx->tx_info)); 1608 } 1609 } 1610 } 1611 1612 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy. 1613 * Useful when you want to change one particular aspect of the endpoint and then 1614 * issue a configure endpoint command. 1615 */ 1616 void xhci_endpoint_copy(struct xhci_hcd *xhci, 1617 struct xhci_container_ctx *in_ctx, 1618 struct xhci_container_ctx *out_ctx, 1619 unsigned int ep_index) 1620 { 1621 struct xhci_ep_ctx *out_ep_ctx; 1622 struct xhci_ep_ctx *in_ep_ctx; 1623 1624 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1625 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 1626 1627 in_ep_ctx->ep_info = out_ep_ctx->ep_info; 1628 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; 1629 in_ep_ctx->deq = out_ep_ctx->deq; 1630 in_ep_ctx->tx_info = out_ep_ctx->tx_info; 1631 if (xhci->quirks & XHCI_MTK_HOST) { 1632 in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0]; 1633 in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1]; 1634 } 1635 } 1636 1637 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx. 1638 * Useful when you want to change one particular aspect of the endpoint and then 1639 * issue a configure endpoint command. Only the context entries field matters, 1640 * but we'll copy the whole thing anyway. 1641 */ 1642 void xhci_slot_copy(struct xhci_hcd *xhci, 1643 struct xhci_container_ctx *in_ctx, 1644 struct xhci_container_ctx *out_ctx) 1645 { 1646 struct xhci_slot_ctx *in_slot_ctx; 1647 struct xhci_slot_ctx *out_slot_ctx; 1648 1649 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 1650 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx); 1651 1652 in_slot_ctx->dev_info = out_slot_ctx->dev_info; 1653 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; 1654 in_slot_ctx->tt_info = out_slot_ctx->tt_info; 1655 in_slot_ctx->dev_state = out_slot_ctx->dev_state; 1656 } 1657 1658 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ 1659 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) 1660 { 1661 int i; 1662 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 1663 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 1664 1665 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 1666 "Allocating %d scratchpad buffers", num_sp); 1667 1668 if (!num_sp) 1669 return 0; 1670 1671 xhci->scratchpad = kzalloc_node(sizeof(*xhci->scratchpad), flags, 1672 dev_to_node(dev)); 1673 if (!xhci->scratchpad) 1674 goto fail_sp; 1675 1676 xhci->scratchpad->sp_array = dma_alloc_coherent(dev, 1677 num_sp * sizeof(u64), 1678 &xhci->scratchpad->sp_dma, flags); 1679 if (!xhci->scratchpad->sp_array) 1680 goto fail_sp2; 1681 1682 xhci->scratchpad->sp_buffers = kcalloc_node(num_sp, sizeof(void *), 1683 flags, dev_to_node(dev)); 1684 if (!xhci->scratchpad->sp_buffers) 1685 goto fail_sp3; 1686 1687 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); 1688 for (i = 0; i < num_sp; i++) { 1689 dma_addr_t dma; 1690 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma, 1691 flags); 1692 if (!buf) 1693 goto fail_sp4; 1694 1695 xhci->scratchpad->sp_array[i] = dma; 1696 xhci->scratchpad->sp_buffers[i] = buf; 1697 } 1698 1699 return 0; 1700 1701 fail_sp4: 1702 for (i = i - 1; i >= 0; i--) { 1703 dma_free_coherent(dev, xhci->page_size, 1704 xhci->scratchpad->sp_buffers[i], 1705 xhci->scratchpad->sp_array[i]); 1706 } 1707 1708 kfree(xhci->scratchpad->sp_buffers); 1709 1710 fail_sp3: 1711 dma_free_coherent(dev, num_sp * sizeof(u64), 1712 xhci->scratchpad->sp_array, 1713 xhci->scratchpad->sp_dma); 1714 1715 fail_sp2: 1716 kfree(xhci->scratchpad); 1717 xhci->scratchpad = NULL; 1718 1719 fail_sp: 1720 return -ENOMEM; 1721 } 1722 1723 static void scratchpad_free(struct xhci_hcd *xhci) 1724 { 1725 int num_sp; 1726 int i; 1727 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 1728 1729 if (!xhci->scratchpad) 1730 return; 1731 1732 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 1733 1734 for (i = 0; i < num_sp; i++) { 1735 dma_free_coherent(dev, xhci->page_size, 1736 xhci->scratchpad->sp_buffers[i], 1737 xhci->scratchpad->sp_array[i]); 1738 } 1739 kfree(xhci->scratchpad->sp_buffers); 1740 dma_free_coherent(dev, num_sp * sizeof(u64), 1741 xhci->scratchpad->sp_array, 1742 xhci->scratchpad->sp_dma); 1743 kfree(xhci->scratchpad); 1744 xhci->scratchpad = NULL; 1745 } 1746 1747 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, 1748 bool allocate_completion, gfp_t mem_flags) 1749 { 1750 struct xhci_command *command; 1751 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 1752 1753 command = kzalloc_node(sizeof(*command), mem_flags, dev_to_node(dev)); 1754 if (!command) 1755 return NULL; 1756 1757 if (allocate_completion) { 1758 command->completion = 1759 kzalloc_node(sizeof(struct completion), mem_flags, 1760 dev_to_node(dev)); 1761 if (!command->completion) { 1762 kfree(command); 1763 return NULL; 1764 } 1765 init_completion(command->completion); 1766 } 1767 1768 command->status = 0; 1769 INIT_LIST_HEAD(&command->cmd_list); 1770 return command; 1771 } 1772 1773 struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci, 1774 bool allocate_completion, gfp_t mem_flags) 1775 { 1776 struct xhci_command *command; 1777 1778 command = xhci_alloc_command(xhci, allocate_completion, mem_flags); 1779 if (!command) 1780 return NULL; 1781 1782 command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, 1783 mem_flags); 1784 if (!command->in_ctx) { 1785 kfree(command->completion); 1786 kfree(command); 1787 return NULL; 1788 } 1789 return command; 1790 } 1791 1792 void xhci_urb_free_priv(struct urb_priv *urb_priv) 1793 { 1794 kfree(urb_priv); 1795 } 1796 1797 void xhci_free_command(struct xhci_hcd *xhci, 1798 struct xhci_command *command) 1799 { 1800 xhci_free_container_ctx(xhci, 1801 command->in_ctx); 1802 kfree(command->completion); 1803 kfree(command); 1804 } 1805 1806 int xhci_alloc_erst(struct xhci_hcd *xhci, 1807 struct xhci_ring *evt_ring, 1808 struct xhci_erst *erst, 1809 gfp_t flags) 1810 { 1811 size_t size; 1812 unsigned int val; 1813 struct xhci_segment *seg; 1814 struct xhci_erst_entry *entry; 1815 1816 size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs; 1817 erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev, 1818 size, &erst->erst_dma_addr, flags); 1819 if (!erst->entries) 1820 return -ENOMEM; 1821 1822 erst->num_entries = evt_ring->num_segs; 1823 1824 seg = evt_ring->first_seg; 1825 for (val = 0; val < evt_ring->num_segs; val++) { 1826 entry = &erst->entries[val]; 1827 entry->seg_addr = cpu_to_le64(seg->dma); 1828 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); 1829 entry->rsvd = 0; 1830 seg = seg->next; 1831 } 1832 1833 return 0; 1834 } 1835 1836 void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) 1837 { 1838 size_t size; 1839 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 1840 1841 size = sizeof(struct xhci_erst_entry) * (erst->num_entries); 1842 if (erst->entries) 1843 dma_free_coherent(dev, size, 1844 erst->entries, 1845 erst->erst_dma_addr); 1846 erst->entries = NULL; 1847 } 1848 1849 void xhci_mem_cleanup(struct xhci_hcd *xhci) 1850 { 1851 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 1852 int i, j, num_ports; 1853 1854 cancel_delayed_work_sync(&xhci->cmd_timer); 1855 1856 xhci_free_erst(xhci, &xhci->erst); 1857 1858 if (xhci->event_ring) 1859 xhci_ring_free(xhci, xhci->event_ring); 1860 xhci->event_ring = NULL; 1861 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring"); 1862 1863 if (xhci->lpm_command) 1864 xhci_free_command(xhci, xhci->lpm_command); 1865 xhci->lpm_command = NULL; 1866 if (xhci->cmd_ring) 1867 xhci_ring_free(xhci, xhci->cmd_ring); 1868 xhci->cmd_ring = NULL; 1869 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring"); 1870 xhci_cleanup_command_queue(xhci); 1871 1872 num_ports = HCS_MAX_PORTS(xhci->hcs_params1); 1873 for (i = 0; i < num_ports && xhci->rh_bw; i++) { 1874 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; 1875 for (j = 0; j < XHCI_MAX_INTERVAL; j++) { 1876 struct list_head *ep = &bwt->interval_bw[j].endpoints; 1877 while (!list_empty(ep)) 1878 list_del_init(ep->next); 1879 } 1880 } 1881 1882 for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--) 1883 xhci_free_virt_devices_depth_first(xhci, i); 1884 1885 dma_pool_destroy(xhci->segment_pool); 1886 xhci->segment_pool = NULL; 1887 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool"); 1888 1889 dma_pool_destroy(xhci->device_pool); 1890 xhci->device_pool = NULL; 1891 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool"); 1892 1893 dma_pool_destroy(xhci->small_streams_pool); 1894 xhci->small_streams_pool = NULL; 1895 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 1896 "Freed small stream array pool"); 1897 1898 dma_pool_destroy(xhci->medium_streams_pool); 1899 xhci->medium_streams_pool = NULL; 1900 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 1901 "Freed medium stream array pool"); 1902 1903 if (xhci->dcbaa) 1904 dma_free_coherent(dev, sizeof(*xhci->dcbaa), 1905 xhci->dcbaa, xhci->dcbaa->dma); 1906 xhci->dcbaa = NULL; 1907 1908 scratchpad_free(xhci); 1909 1910 if (!xhci->rh_bw) 1911 goto no_bw; 1912 1913 for (i = 0; i < num_ports; i++) { 1914 struct xhci_tt_bw_info *tt, *n; 1915 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) { 1916 list_del(&tt->tt_list); 1917 kfree(tt); 1918 } 1919 } 1920 1921 no_bw: 1922 xhci->cmd_ring_reserved_trbs = 0; 1923 xhci->usb2_rhub.num_ports = 0; 1924 xhci->usb3_rhub.num_ports = 0; 1925 xhci->num_active_eps = 0; 1926 kfree(xhci->usb2_rhub.ports); 1927 kfree(xhci->usb3_rhub.ports); 1928 kfree(xhci->hw_ports); 1929 kfree(xhci->rh_bw); 1930 kfree(xhci->ext_caps); 1931 for (i = 0; i < xhci->num_port_caps; i++) 1932 kfree(xhci->port_caps[i].psi); 1933 kfree(xhci->port_caps); 1934 xhci->num_port_caps = 0; 1935 1936 xhci->usb2_rhub.ports = NULL; 1937 xhci->usb3_rhub.ports = NULL; 1938 xhci->hw_ports = NULL; 1939 xhci->rh_bw = NULL; 1940 xhci->ext_caps = NULL; 1941 1942 xhci->page_size = 0; 1943 xhci->page_shift = 0; 1944 xhci->usb2_rhub.bus_state.bus_suspended = 0; 1945 xhci->usb3_rhub.bus_state.bus_suspended = 0; 1946 } 1947 1948 static int xhci_test_trb_in_td(struct xhci_hcd *xhci, 1949 struct xhci_segment *input_seg, 1950 union xhci_trb *start_trb, 1951 union xhci_trb *end_trb, 1952 dma_addr_t input_dma, 1953 struct xhci_segment *result_seg, 1954 char *test_name, int test_number) 1955 { 1956 unsigned long long start_dma; 1957 unsigned long long end_dma; 1958 struct xhci_segment *seg; 1959 1960 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb); 1961 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb); 1962 1963 seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false); 1964 if (seg != result_seg) { 1965 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n", 1966 test_name, test_number); 1967 xhci_warn(xhci, "Tested TRB math w/ seg %p and " 1968 "input DMA 0x%llx\n", 1969 input_seg, 1970 (unsigned long long) input_dma); 1971 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), " 1972 "ending TRB %p (0x%llx DMA)\n", 1973 start_trb, start_dma, 1974 end_trb, end_dma); 1975 xhci_warn(xhci, "Expected seg %p, got seg %p\n", 1976 result_seg, seg); 1977 trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, 1978 true); 1979 return -1; 1980 } 1981 return 0; 1982 } 1983 1984 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */ 1985 static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci) 1986 { 1987 struct { 1988 dma_addr_t input_dma; 1989 struct xhci_segment *result_seg; 1990 } simple_test_vector [] = { 1991 /* A zeroed DMA field should fail */ 1992 { 0, NULL }, 1993 /* One TRB before the ring start should fail */ 1994 { xhci->event_ring->first_seg->dma - 16, NULL }, 1995 /* One byte before the ring start should fail */ 1996 { xhci->event_ring->first_seg->dma - 1, NULL }, 1997 /* Starting TRB should succeed */ 1998 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg }, 1999 /* Ending TRB should succeed */ 2000 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16, 2001 xhci->event_ring->first_seg }, 2002 /* One byte after the ring end should fail */ 2003 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL }, 2004 /* One TRB after the ring end should fail */ 2005 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL }, 2006 /* An address of all ones should fail */ 2007 { (dma_addr_t) (~0), NULL }, 2008 }; 2009 struct { 2010 struct xhci_segment *input_seg; 2011 union xhci_trb *start_trb; 2012 union xhci_trb *end_trb; 2013 dma_addr_t input_dma; 2014 struct xhci_segment *result_seg; 2015 } complex_test_vector [] = { 2016 /* Test feeding a valid DMA address from a different ring */ 2017 { .input_seg = xhci->event_ring->first_seg, 2018 .start_trb = xhci->event_ring->first_seg->trbs, 2019 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 2020 .input_dma = xhci->cmd_ring->first_seg->dma, 2021 .result_seg = NULL, 2022 }, 2023 /* Test feeding a valid end TRB from a different ring */ 2024 { .input_seg = xhci->event_ring->first_seg, 2025 .start_trb = xhci->event_ring->first_seg->trbs, 2026 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 2027 .input_dma = xhci->cmd_ring->first_seg->dma, 2028 .result_seg = NULL, 2029 }, 2030 /* Test feeding a valid start and end TRB from a different ring */ 2031 { .input_seg = xhci->event_ring->first_seg, 2032 .start_trb = xhci->cmd_ring->first_seg->trbs, 2033 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 2034 .input_dma = xhci->cmd_ring->first_seg->dma, 2035 .result_seg = NULL, 2036 }, 2037 /* TRB in this ring, but after this TD */ 2038 { .input_seg = xhci->event_ring->first_seg, 2039 .start_trb = &xhci->event_ring->first_seg->trbs[0], 2040 .end_trb = &xhci->event_ring->first_seg->trbs[3], 2041 .input_dma = xhci->event_ring->first_seg->dma + 4*16, 2042 .result_seg = NULL, 2043 }, 2044 /* TRB in this ring, but before this TD */ 2045 { .input_seg = xhci->event_ring->first_seg, 2046 .start_trb = &xhci->event_ring->first_seg->trbs[3], 2047 .end_trb = &xhci->event_ring->first_seg->trbs[6], 2048 .input_dma = xhci->event_ring->first_seg->dma + 2*16, 2049 .result_seg = NULL, 2050 }, 2051 /* TRB in this ring, but after this wrapped TD */ 2052 { .input_seg = xhci->event_ring->first_seg, 2053 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], 2054 .end_trb = &xhci->event_ring->first_seg->trbs[1], 2055 .input_dma = xhci->event_ring->first_seg->dma + 2*16, 2056 .result_seg = NULL, 2057 }, 2058 /* TRB in this ring, but before this wrapped TD */ 2059 { .input_seg = xhci->event_ring->first_seg, 2060 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], 2061 .end_trb = &xhci->event_ring->first_seg->trbs[1], 2062 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16, 2063 .result_seg = NULL, 2064 }, 2065 /* TRB not in this ring, and we have a wrapped TD */ 2066 { .input_seg = xhci->event_ring->first_seg, 2067 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], 2068 .end_trb = &xhci->event_ring->first_seg->trbs[1], 2069 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16, 2070 .result_seg = NULL, 2071 }, 2072 }; 2073 2074 unsigned int num_tests; 2075 int i, ret; 2076 2077 num_tests = ARRAY_SIZE(simple_test_vector); 2078 for (i = 0; i < num_tests; i++) { 2079 ret = xhci_test_trb_in_td(xhci, 2080 xhci->event_ring->first_seg, 2081 xhci->event_ring->first_seg->trbs, 2082 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 2083 simple_test_vector[i].input_dma, 2084 simple_test_vector[i].result_seg, 2085 "Simple", i); 2086 if (ret < 0) 2087 return ret; 2088 } 2089 2090 num_tests = ARRAY_SIZE(complex_test_vector); 2091 for (i = 0; i < num_tests; i++) { 2092 ret = xhci_test_trb_in_td(xhci, 2093 complex_test_vector[i].input_seg, 2094 complex_test_vector[i].start_trb, 2095 complex_test_vector[i].end_trb, 2096 complex_test_vector[i].input_dma, 2097 complex_test_vector[i].result_seg, 2098 "Complex", i); 2099 if (ret < 0) 2100 return ret; 2101 } 2102 xhci_dbg(xhci, "TRB math tests passed.\n"); 2103 return 0; 2104 } 2105 2106 static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) 2107 { 2108 u64 temp; 2109 dma_addr_t deq; 2110 2111 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, 2112 xhci->event_ring->dequeue); 2113 if (deq == 0 && !in_interrupt()) 2114 xhci_warn(xhci, "WARN something wrong with SW event ring " 2115 "dequeue ptr.\n"); 2116 /* Update HC event ring dequeue pointer */ 2117 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2118 temp &= ERST_PTR_MASK; 2119 /* Don't clear the EHB bit (which is RW1C) because 2120 * there might be more events to service. 2121 */ 2122 temp &= ~ERST_EHB; 2123 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2124 "// Write event ring dequeue pointer, " 2125 "preserving EHB bit"); 2126 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, 2127 &xhci->ir_set->erst_dequeue); 2128 } 2129 2130 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, 2131 __le32 __iomem *addr, int max_caps) 2132 { 2133 u32 temp, port_offset, port_count; 2134 int i; 2135 u8 major_revision, minor_revision; 2136 struct xhci_hub *rhub; 2137 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 2138 struct xhci_port_cap *port_cap; 2139 2140 temp = readl(addr); 2141 major_revision = XHCI_EXT_PORT_MAJOR(temp); 2142 minor_revision = XHCI_EXT_PORT_MINOR(temp); 2143 2144 if (major_revision == 0x03) { 2145 rhub = &xhci->usb3_rhub; 2146 } else if (major_revision <= 0x02) { 2147 rhub = &xhci->usb2_rhub; 2148 } else { 2149 xhci_warn(xhci, "Ignoring unknown port speed, " 2150 "Ext Cap %p, revision = 0x%x\n", 2151 addr, major_revision); 2152 /* Ignoring port protocol we can't understand. FIXME */ 2153 return; 2154 } 2155 rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp); 2156 2157 if (rhub->min_rev < minor_revision) 2158 rhub->min_rev = minor_revision; 2159 2160 /* Port offset and count in the third dword, see section 7.2 */ 2161 temp = readl(addr + 2); 2162 port_offset = XHCI_EXT_PORT_OFF(temp); 2163 port_count = XHCI_EXT_PORT_COUNT(temp); 2164 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2165 "Ext Cap %p, port offset = %u, " 2166 "count = %u, revision = 0x%x", 2167 addr, port_offset, port_count, major_revision); 2168 /* Port count includes the current port offset */ 2169 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports) 2170 /* WTF? "Valid values are ‘1’ to MaxPorts" */ 2171 return; 2172 2173 port_cap = &xhci->port_caps[xhci->num_port_caps++]; 2174 if (xhci->num_port_caps > max_caps) 2175 return; 2176 2177 port_cap->maj_rev = major_revision; 2178 port_cap->min_rev = minor_revision; 2179 port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp); 2180 2181 if (port_cap->psi_count) { 2182 port_cap->psi = kcalloc_node(port_cap->psi_count, 2183 sizeof(*port_cap->psi), 2184 GFP_KERNEL, dev_to_node(dev)); 2185 if (!port_cap->psi) 2186 port_cap->psi_count = 0; 2187 2188 port_cap->psi_uid_count++; 2189 for (i = 0; i < port_cap->psi_count; i++) { 2190 port_cap->psi[i] = readl(addr + 4 + i); 2191 2192 /* count unique ID values, two consecutive entries can 2193 * have the same ID if link is assymetric 2194 */ 2195 if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) != 2196 XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1]))) 2197 port_cap->psi_uid_count++; 2198 2199 xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n", 2200 XHCI_EXT_PORT_PSIV(port_cap->psi[i]), 2201 XHCI_EXT_PORT_PSIE(port_cap->psi[i]), 2202 XHCI_EXT_PORT_PLT(port_cap->psi[i]), 2203 XHCI_EXT_PORT_PFD(port_cap->psi[i]), 2204 XHCI_EXT_PORT_LP(port_cap->psi[i]), 2205 XHCI_EXT_PORT_PSIM(port_cap->psi[i])); 2206 } 2207 } 2208 /* cache usb2 port capabilities */ 2209 if (major_revision < 0x03 && xhci->num_ext_caps < max_caps) 2210 xhci->ext_caps[xhci->num_ext_caps++] = temp; 2211 2212 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) && 2213 (temp & XHCI_HLC)) { 2214 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2215 "xHCI 1.0: support USB2 hardware lpm"); 2216 xhci->hw_lpm_support = 1; 2217 } 2218 2219 port_offset--; 2220 for (i = port_offset; i < (port_offset + port_count); i++) { 2221 struct xhci_port *hw_port = &xhci->hw_ports[i]; 2222 /* Duplicate entry. Ignore the port if the revisions differ. */ 2223 if (hw_port->rhub) { 2224 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p," 2225 " port %u\n", addr, i); 2226 xhci_warn(xhci, "Port was marked as USB %u, " 2227 "duplicated as USB %u\n", 2228 hw_port->rhub->maj_rev, major_revision); 2229 /* Only adjust the roothub port counts if we haven't 2230 * found a similar duplicate. 2231 */ 2232 if (hw_port->rhub != rhub && 2233 hw_port->hcd_portnum != DUPLICATE_ENTRY) { 2234 hw_port->rhub->num_ports--; 2235 hw_port->hcd_portnum = DUPLICATE_ENTRY; 2236 } 2237 continue; 2238 } 2239 hw_port->rhub = rhub; 2240 hw_port->port_cap = port_cap; 2241 rhub->num_ports++; 2242 } 2243 /* FIXME: Should we disable ports not in the Extended Capabilities? */ 2244 } 2245 2246 static void xhci_create_rhub_port_array(struct xhci_hcd *xhci, 2247 struct xhci_hub *rhub, gfp_t flags) 2248 { 2249 int port_index = 0; 2250 int i; 2251 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 2252 2253 if (!rhub->num_ports) 2254 return; 2255 rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports), 2256 flags, dev_to_node(dev)); 2257 for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) { 2258 if (xhci->hw_ports[i].rhub != rhub || 2259 xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY) 2260 continue; 2261 xhci->hw_ports[i].hcd_portnum = port_index; 2262 rhub->ports[port_index] = &xhci->hw_ports[i]; 2263 port_index++; 2264 if (port_index == rhub->num_ports) 2265 break; 2266 } 2267 } 2268 2269 /* 2270 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that 2271 * specify what speeds each port is supposed to be. We can't count on the port 2272 * speed bits in the PORTSC register being correct until a device is connected, 2273 * but we need to set up the two fake roothubs with the correct number of USB 2274 * 3.0 and USB 2.0 ports at host controller initialization time. 2275 */ 2276 static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) 2277 { 2278 void __iomem *base; 2279 u32 offset; 2280 unsigned int num_ports; 2281 int i, j; 2282 int cap_count = 0; 2283 u32 cap_start; 2284 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 2285 2286 num_ports = HCS_MAX_PORTS(xhci->hcs_params1); 2287 xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports), 2288 flags, dev_to_node(dev)); 2289 if (!xhci->hw_ports) 2290 return -ENOMEM; 2291 2292 for (i = 0; i < num_ports; i++) { 2293 xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base + 2294 NUM_PORT_REGS * i; 2295 xhci->hw_ports[i].hw_portnum = i; 2296 } 2297 2298 xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags, 2299 dev_to_node(dev)); 2300 if (!xhci->rh_bw) 2301 return -ENOMEM; 2302 for (i = 0; i < num_ports; i++) { 2303 struct xhci_interval_bw_table *bw_table; 2304 2305 INIT_LIST_HEAD(&xhci->rh_bw[i].tts); 2306 bw_table = &xhci->rh_bw[i].bw_table; 2307 for (j = 0; j < XHCI_MAX_INTERVAL; j++) 2308 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints); 2309 } 2310 base = &xhci->cap_regs->hc_capbase; 2311 2312 cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL); 2313 if (!cap_start) { 2314 xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n"); 2315 return -ENODEV; 2316 } 2317 2318 offset = cap_start; 2319 /* count extended protocol capability entries for later caching */ 2320 while (offset) { 2321 cap_count++; 2322 offset = xhci_find_next_ext_cap(base, offset, 2323 XHCI_EXT_CAPS_PROTOCOL); 2324 } 2325 2326 xhci->ext_caps = kcalloc_node(cap_count, sizeof(*xhci->ext_caps), 2327 flags, dev_to_node(dev)); 2328 if (!xhci->ext_caps) 2329 return -ENOMEM; 2330 2331 xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps), 2332 flags, dev_to_node(dev)); 2333 if (!xhci->port_caps) 2334 return -ENOMEM; 2335 2336 offset = cap_start; 2337 2338 while (offset) { 2339 xhci_add_in_port(xhci, num_ports, base + offset, cap_count); 2340 if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports == 2341 num_ports) 2342 break; 2343 offset = xhci_find_next_ext_cap(base, offset, 2344 XHCI_EXT_CAPS_PROTOCOL); 2345 } 2346 if (xhci->usb2_rhub.num_ports == 0 && xhci->usb3_rhub.num_ports == 0) { 2347 xhci_warn(xhci, "No ports on the roothubs?\n"); 2348 return -ENODEV; 2349 } 2350 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2351 "Found %u USB 2.0 ports and %u USB 3.0 ports.", 2352 xhci->usb2_rhub.num_ports, xhci->usb3_rhub.num_ports); 2353 2354 /* Place limits on the number of roothub ports so that the hub 2355 * descriptors aren't longer than the USB core will allocate. 2356 */ 2357 if (xhci->usb3_rhub.num_ports > USB_SS_MAXPORTS) { 2358 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2359 "Limiting USB 3.0 roothub ports to %u.", 2360 USB_SS_MAXPORTS); 2361 xhci->usb3_rhub.num_ports = USB_SS_MAXPORTS; 2362 } 2363 if (xhci->usb2_rhub.num_ports > USB_MAXCHILDREN) { 2364 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2365 "Limiting USB 2.0 roothub ports to %u.", 2366 USB_MAXCHILDREN); 2367 xhci->usb2_rhub.num_ports = USB_MAXCHILDREN; 2368 } 2369 2370 /* 2371 * Note we could have all USB 3.0 ports, or all USB 2.0 ports. 2372 * Not sure how the USB core will handle a hub with no ports... 2373 */ 2374 2375 xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags); 2376 xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags); 2377 2378 return 0; 2379 } 2380 2381 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) 2382 { 2383 dma_addr_t dma; 2384 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 2385 unsigned int val, val2; 2386 u64 val_64; 2387 u32 page_size, temp; 2388 int i, ret; 2389 2390 INIT_LIST_HEAD(&xhci->cmd_list); 2391 2392 /* init command timeout work */ 2393 INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout); 2394 init_completion(&xhci->cmd_ring_stop_completion); 2395 2396 page_size = readl(&xhci->op_regs->page_size); 2397 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2398 "Supported page size register = 0x%x", page_size); 2399 for (i = 0; i < 16; i++) { 2400 if ((0x1 & page_size) != 0) 2401 break; 2402 page_size = page_size >> 1; 2403 } 2404 if (i < 16) 2405 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2406 "Supported page size of %iK", (1 << (i+12)) / 1024); 2407 else 2408 xhci_warn(xhci, "WARN: no supported page size\n"); 2409 /* Use 4K pages, since that's common and the minimum the HC supports */ 2410 xhci->page_shift = 12; 2411 xhci->page_size = 1 << xhci->page_shift; 2412 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2413 "HCD page size set to %iK", xhci->page_size / 1024); 2414 2415 /* 2416 * Program the Number of Device Slots Enabled field in the CONFIG 2417 * register with the max value of slots the HC can handle. 2418 */ 2419 val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1)); 2420 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2421 "// xHC can handle at most %d device slots.", val); 2422 val2 = readl(&xhci->op_regs->config_reg); 2423 val |= (val2 & ~HCS_SLOTS_MASK); 2424 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2425 "// Setting Max device slots reg = 0x%x.", val); 2426 writel(val, &xhci->op_regs->config_reg); 2427 2428 /* 2429 * xHCI section 5.4.6 - doorbell array must be 2430 * "physically contiguous and 64-byte (cache line) aligned". 2431 */ 2432 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, 2433 flags); 2434 if (!xhci->dcbaa) 2435 goto fail; 2436 xhci->dcbaa->dma = dma; 2437 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2438 "// Device context base array address = 0x%llx (DMA), %p (virt)", 2439 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); 2440 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); 2441 2442 /* 2443 * Initialize the ring segment pool. The ring must be a contiguous 2444 * structure comprised of TRBs. The TRBs must be 16 byte aligned, 2445 * however, the command ring segment needs 64-byte aligned segments 2446 * and our use of dma addresses in the trb_address_map radix tree needs 2447 * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need. 2448 */ 2449 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, 2450 TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size); 2451 2452 /* See Table 46 and Note on Figure 55 */ 2453 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 2454 2112, 64, xhci->page_size); 2455 if (!xhci->segment_pool || !xhci->device_pool) 2456 goto fail; 2457 2458 /* Linear stream context arrays don't have any boundary restrictions, 2459 * and only need to be 16-byte aligned. 2460 */ 2461 xhci->small_streams_pool = 2462 dma_pool_create("xHCI 256 byte stream ctx arrays", 2463 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0); 2464 xhci->medium_streams_pool = 2465 dma_pool_create("xHCI 1KB stream ctx arrays", 2466 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0); 2467 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE 2468 * will be allocated with dma_alloc_coherent() 2469 */ 2470 2471 if (!xhci->small_streams_pool || !xhci->medium_streams_pool) 2472 goto fail; 2473 2474 /* Set up the command ring to have one segments for now. */ 2475 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags); 2476 if (!xhci->cmd_ring) 2477 goto fail; 2478 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2479 "Allocated command ring at %p", xhci->cmd_ring); 2480 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx", 2481 (unsigned long long)xhci->cmd_ring->first_seg->dma); 2482 2483 /* Set the address in the Command Ring Control register */ 2484 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 2485 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 2486 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | 2487 xhci->cmd_ring->cycle_state; 2488 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2489 "// Setting command ring address to 0x%016llx", val_64); 2490 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 2491 2492 xhci->lpm_command = xhci_alloc_command_with_ctx(xhci, true, flags); 2493 if (!xhci->lpm_command) 2494 goto fail; 2495 2496 /* Reserve one command ring TRB for disabling LPM. 2497 * Since the USB core grabs the shared usb_bus bandwidth mutex before 2498 * disabling LPM, we only need to reserve one TRB for all devices. 2499 */ 2500 xhci->cmd_ring_reserved_trbs++; 2501 2502 val = readl(&xhci->cap_regs->db_off); 2503 val &= DBOFF_MASK; 2504 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2505 "// Doorbell array is located at offset 0x%x" 2506 " from cap regs base addr", val); 2507 xhci->dba = (void __iomem *) xhci->cap_regs + val; 2508 /* Set ir_set to interrupt register set 0 */ 2509 xhci->ir_set = &xhci->run_regs->ir_set[0]; 2510 2511 /* 2512 * Event ring setup: Allocate a normal ring, but also setup 2513 * the event ring segment table (ERST). Section 4.9.3. 2514 */ 2515 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring"); 2516 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, 2517 0, flags); 2518 if (!xhci->event_ring) 2519 goto fail; 2520 if (xhci_check_trb_in_td_math(xhci) < 0) 2521 goto fail; 2522 2523 ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags); 2524 if (ret) 2525 goto fail; 2526 2527 /* set ERST count with the number of entries in the segment table */ 2528 val = readl(&xhci->ir_set->erst_size); 2529 val &= ERST_SIZE_MASK; 2530 val |= ERST_NUM_SEGS; 2531 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2532 "// Write ERST size = %i to ir_set 0 (some bits preserved)", 2533 val); 2534 writel(val, &xhci->ir_set->erst_size); 2535 2536 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2537 "// Set ERST entries to point to event ring."); 2538 /* set the segment table base address */ 2539 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2540 "// Set ERST base address for ir_set 0 = 0x%llx", 2541 (unsigned long long)xhci->erst.erst_dma_addr); 2542 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); 2543 val_64 &= ERST_PTR_MASK; 2544 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); 2545 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); 2546 2547 /* Set the event ring dequeue address */ 2548 xhci_set_hc_event_deq(xhci); 2549 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2550 "Wrote ERST address to ir_set 0."); 2551 2552 /* 2553 * XXX: Might need to set the Interrupter Moderation Register to 2554 * something other than the default (~1ms minimum between interrupts). 2555 * See section 5.5.1.2. 2556 */ 2557 for (i = 0; i < MAX_HC_SLOTS; i++) 2558 xhci->devs[i] = NULL; 2559 for (i = 0; i < USB_MAXCHILDREN; i++) { 2560 xhci->usb2_rhub.bus_state.resume_done[i] = 0; 2561 xhci->usb3_rhub.bus_state.resume_done[i] = 0; 2562 /* Only the USB 2.0 completions will ever be used. */ 2563 init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]); 2564 init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]); 2565 } 2566 2567 if (scratchpad_alloc(xhci, flags)) 2568 goto fail; 2569 if (xhci_setup_port_arrays(xhci, flags)) 2570 goto fail; 2571 2572 /* Enable USB 3.0 device notifications for function remote wake, which 2573 * is necessary for allowing USB 3.0 devices to do remote wakeup from 2574 * U3 (device suspend). 2575 */ 2576 temp = readl(&xhci->op_regs->dev_notification); 2577 temp &= ~DEV_NOTE_MASK; 2578 temp |= DEV_NOTE_FWAKE; 2579 writel(temp, &xhci->op_regs->dev_notification); 2580 2581 return 0; 2582 2583 fail: 2584 xhci_halt(xhci); 2585 xhci_reset(xhci); 2586 xhci_mem_cleanup(xhci); 2587 return -ENOMEM; 2588 } 2589