1 /* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #include <linux/usb.h> 24 #include <linux/pci.h> 25 #include <linux/slab.h> 26 #include <linux/dmapool.h> 27 28 #include "xhci.h" 29 30 /* 31 * Allocates a generic ring segment from the ring pool, sets the dma address, 32 * initializes the segment to zero, and sets the private next pointer to NULL. 33 * 34 * Section 4.11.1.1: 35 * "All components of all Command and Transfer TRBs shall be initialized to '0'" 36 */ 37 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, 38 unsigned int cycle_state, gfp_t flags) 39 { 40 struct xhci_segment *seg; 41 dma_addr_t dma; 42 int i; 43 44 seg = kzalloc(sizeof *seg, flags); 45 if (!seg) 46 return NULL; 47 48 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma); 49 if (!seg->trbs) { 50 kfree(seg); 51 return NULL; 52 } 53 54 memset(seg->trbs, 0, TRB_SEGMENT_SIZE); 55 /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ 56 if (cycle_state == 0) { 57 for (i = 0; i < TRBS_PER_SEGMENT; i++) 58 seg->trbs[i].link.control |= TRB_CYCLE; 59 } 60 seg->dma = dma; 61 seg->next = NULL; 62 63 return seg; 64 } 65 66 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) 67 { 68 if (seg->trbs) { 69 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); 70 seg->trbs = NULL; 71 } 72 kfree(seg); 73 } 74 75 static void xhci_free_segments_for_ring(struct xhci_hcd *xhci, 76 struct xhci_segment *first) 77 { 78 struct xhci_segment *seg; 79 80 seg = first->next; 81 while (seg != first) { 82 struct xhci_segment *next = seg->next; 83 xhci_segment_free(xhci, seg); 84 seg = next; 85 } 86 xhci_segment_free(xhci, first); 87 } 88 89 /* 90 * Make the prev segment point to the next segment. 91 * 92 * Change the last TRB in the prev segment to be a Link TRB which points to the 93 * DMA address of the next segment. The caller needs to set any Link TRB 94 * related flags, such as End TRB, Toggle Cycle, and no snoop. 95 */ 96 static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, 97 struct xhci_segment *next, enum xhci_ring_type type) 98 { 99 u32 val; 100 101 if (!prev || !next) 102 return; 103 prev->next = next; 104 if (type != TYPE_EVENT) { 105 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = 106 cpu_to_le64(next->dma); 107 108 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ 109 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); 110 val &= ~TRB_TYPE_BITMASK; 111 val |= TRB_TYPE(TRB_LINK); 112 /* Always set the chain bit with 0.95 hardware */ 113 /* Set chain bit for isoc rings on AMD 0.96 host */ 114 if (xhci_link_trb_quirk(xhci) || 115 (type == TYPE_ISOC && 116 (xhci->quirks & XHCI_AMD_0x96_HOST))) 117 val |= TRB_CHAIN; 118 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); 119 } 120 } 121 122 /* 123 * Link the ring to the new segments. 124 * Set Toggle Cycle for the new ring if needed. 125 */ 126 static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring, 127 struct xhci_segment *first, struct xhci_segment *last, 128 unsigned int num_segs) 129 { 130 struct xhci_segment *next; 131 132 if (!ring || !first || !last) 133 return; 134 135 next = ring->enq_seg->next; 136 xhci_link_segments(xhci, ring->enq_seg, first, ring->type); 137 xhci_link_segments(xhci, last, next, ring->type); 138 ring->num_segs += num_segs; 139 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs; 140 141 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) { 142 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control 143 &= ~cpu_to_le32(LINK_TOGGLE); 144 last->trbs[TRBS_PER_SEGMENT-1].link.control 145 |= cpu_to_le32(LINK_TOGGLE); 146 ring->last_seg = last; 147 } 148 } 149 150 /* XXX: Do we need the hcd structure in all these functions? */ 151 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) 152 { 153 if (!ring) 154 return; 155 156 if (ring->first_seg) 157 xhci_free_segments_for_ring(xhci, ring->first_seg); 158 159 kfree(ring); 160 } 161 162 static void xhci_initialize_ring_info(struct xhci_ring *ring, 163 unsigned int cycle_state) 164 { 165 /* The ring is empty, so the enqueue pointer == dequeue pointer */ 166 ring->enqueue = ring->first_seg->trbs; 167 ring->enq_seg = ring->first_seg; 168 ring->dequeue = ring->enqueue; 169 ring->deq_seg = ring->first_seg; 170 /* The ring is initialized to 0. The producer must write 1 to the cycle 171 * bit to handover ownership of the TRB, so PCS = 1. The consumer must 172 * compare CCS to the cycle bit to check ownership, so CCS = 1. 173 * 174 * New rings are initialized with cycle state equal to 1; if we are 175 * handling ring expansion, set the cycle state equal to the old ring. 176 */ 177 ring->cycle_state = cycle_state; 178 /* Not necessary for new rings, but needed for re-initialized rings */ 179 ring->enq_updates = 0; 180 ring->deq_updates = 0; 181 182 /* 183 * Each segment has a link TRB, and leave an extra TRB for SW 184 * accounting purpose 185 */ 186 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; 187 } 188 189 /* Allocate segments and link them for a ring */ 190 static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, 191 struct xhci_segment **first, struct xhci_segment **last, 192 unsigned int num_segs, unsigned int cycle_state, 193 enum xhci_ring_type type, gfp_t flags) 194 { 195 struct xhci_segment *prev; 196 197 prev = xhci_segment_alloc(xhci, cycle_state, flags); 198 if (!prev) 199 return -ENOMEM; 200 num_segs--; 201 202 *first = prev; 203 while (num_segs > 0) { 204 struct xhci_segment *next; 205 206 next = xhci_segment_alloc(xhci, cycle_state, flags); 207 if (!next) { 208 prev = *first; 209 while (prev) { 210 next = prev->next; 211 xhci_segment_free(xhci, prev); 212 prev = next; 213 } 214 return -ENOMEM; 215 } 216 xhci_link_segments(xhci, prev, next, type); 217 218 prev = next; 219 num_segs--; 220 } 221 xhci_link_segments(xhci, prev, *first, type); 222 *last = prev; 223 224 return 0; 225 } 226 227 /** 228 * Create a new ring with zero or more segments. 229 * 230 * Link each segment together into a ring. 231 * Set the end flag and the cycle toggle bit on the last segment. 232 * See section 4.9.1 and figures 15 and 16. 233 */ 234 static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, 235 unsigned int num_segs, unsigned int cycle_state, 236 enum xhci_ring_type type, gfp_t flags) 237 { 238 struct xhci_ring *ring; 239 int ret; 240 241 ring = kzalloc(sizeof *(ring), flags); 242 if (!ring) 243 return NULL; 244 245 ring->num_segs = num_segs; 246 INIT_LIST_HEAD(&ring->td_list); 247 ring->type = type; 248 if (num_segs == 0) 249 return ring; 250 251 ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, 252 &ring->last_seg, num_segs, cycle_state, type, flags); 253 if (ret) 254 goto fail; 255 256 /* Only event ring does not use link TRB */ 257 if (type != TYPE_EVENT) { 258 /* See section 4.9.2.1 and 6.4.4.1 */ 259 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= 260 cpu_to_le32(LINK_TOGGLE); 261 } 262 xhci_initialize_ring_info(ring, cycle_state); 263 return ring; 264 265 fail: 266 kfree(ring); 267 return NULL; 268 } 269 270 void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, 271 struct xhci_virt_device *virt_dev, 272 unsigned int ep_index) 273 { 274 int rings_cached; 275 276 rings_cached = virt_dev->num_rings_cached; 277 if (rings_cached < XHCI_MAX_RINGS_CACHED) { 278 virt_dev->ring_cache[rings_cached] = 279 virt_dev->eps[ep_index].ring; 280 virt_dev->num_rings_cached++; 281 xhci_dbg(xhci, "Cached old ring, " 282 "%d ring%s cached\n", 283 virt_dev->num_rings_cached, 284 (virt_dev->num_rings_cached > 1) ? "s" : ""); 285 } else { 286 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring); 287 xhci_dbg(xhci, "Ring cache full (%d rings), " 288 "freeing ring\n", 289 virt_dev->num_rings_cached); 290 } 291 virt_dev->eps[ep_index].ring = NULL; 292 } 293 294 /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue 295 * pointers to the beginning of the ring. 296 */ 297 static void xhci_reinit_cached_ring(struct xhci_hcd *xhci, 298 struct xhci_ring *ring, unsigned int cycle_state, 299 enum xhci_ring_type type) 300 { 301 struct xhci_segment *seg = ring->first_seg; 302 int i; 303 304 do { 305 memset(seg->trbs, 0, 306 sizeof(union xhci_trb)*TRBS_PER_SEGMENT); 307 if (cycle_state == 0) { 308 for (i = 0; i < TRBS_PER_SEGMENT; i++) 309 seg->trbs[i].link.control |= TRB_CYCLE; 310 } 311 /* All endpoint rings have link TRBs */ 312 xhci_link_segments(xhci, seg, seg->next, type); 313 seg = seg->next; 314 } while (seg != ring->first_seg); 315 ring->type = type; 316 xhci_initialize_ring_info(ring, cycle_state); 317 /* td list should be empty since all URBs have been cancelled, 318 * but just in case... 319 */ 320 INIT_LIST_HEAD(&ring->td_list); 321 } 322 323 /* 324 * Expand an existing ring. 325 * Look for a cached ring or allocate a new ring which has same segment numbers 326 * and link the two rings. 327 */ 328 int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, 329 unsigned int num_trbs, gfp_t flags) 330 { 331 struct xhci_segment *first; 332 struct xhci_segment *last; 333 unsigned int num_segs; 334 unsigned int num_segs_needed; 335 int ret; 336 337 num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) / 338 (TRBS_PER_SEGMENT - 1); 339 340 /* Allocate number of segments we needed, or double the ring size */ 341 num_segs = ring->num_segs > num_segs_needed ? 342 ring->num_segs : num_segs_needed; 343 344 ret = xhci_alloc_segments_for_ring(xhci, &first, &last, 345 num_segs, ring->cycle_state, ring->type, flags); 346 if (ret) 347 return -ENOMEM; 348 349 xhci_link_rings(xhci, ring, first, last, num_segs); 350 xhci_dbg(xhci, "ring expansion succeed, now has %d segments\n", 351 ring->num_segs); 352 353 return 0; 354 } 355 356 #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) 357 358 static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, 359 int type, gfp_t flags) 360 { 361 struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags); 362 if (!ctx) 363 return NULL; 364 365 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); 366 ctx->type = type; 367 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; 368 if (type == XHCI_CTX_TYPE_INPUT) 369 ctx->size += CTX_SIZE(xhci->hcc_params); 370 371 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); 372 memset(ctx->bytes, 0, ctx->size); 373 return ctx; 374 } 375 376 static void xhci_free_container_ctx(struct xhci_hcd *xhci, 377 struct xhci_container_ctx *ctx) 378 { 379 if (!ctx) 380 return; 381 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); 382 kfree(ctx); 383 } 384 385 struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, 386 struct xhci_container_ctx *ctx) 387 { 388 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); 389 return (struct xhci_input_control_ctx *)ctx->bytes; 390 } 391 392 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, 393 struct xhci_container_ctx *ctx) 394 { 395 if (ctx->type == XHCI_CTX_TYPE_DEVICE) 396 return (struct xhci_slot_ctx *)ctx->bytes; 397 398 return (struct xhci_slot_ctx *) 399 (ctx->bytes + CTX_SIZE(xhci->hcc_params)); 400 } 401 402 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, 403 struct xhci_container_ctx *ctx, 404 unsigned int ep_index) 405 { 406 /* increment ep index by offset of start of ep ctx array */ 407 ep_index++; 408 if (ctx->type == XHCI_CTX_TYPE_INPUT) 409 ep_index++; 410 411 return (struct xhci_ep_ctx *) 412 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); 413 } 414 415 416 /***************** Streams structures manipulation *************************/ 417 418 static void xhci_free_stream_ctx(struct xhci_hcd *xhci, 419 unsigned int num_stream_ctxs, 420 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) 421 { 422 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 423 424 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) 425 dma_free_coherent(&pdev->dev, 426 sizeof(struct xhci_stream_ctx)*num_stream_ctxs, 427 stream_ctx, dma); 428 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) 429 return dma_pool_free(xhci->small_streams_pool, 430 stream_ctx, dma); 431 else 432 return dma_pool_free(xhci->medium_streams_pool, 433 stream_ctx, dma); 434 } 435 436 /* 437 * The stream context array for each endpoint with bulk streams enabled can 438 * vary in size, based on: 439 * - how many streams the endpoint supports, 440 * - the maximum primary stream array size the host controller supports, 441 * - and how many streams the device driver asks for. 442 * 443 * The stream context array must be a power of 2, and can be as small as 444 * 64 bytes or as large as 1MB. 445 */ 446 static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, 447 unsigned int num_stream_ctxs, dma_addr_t *dma, 448 gfp_t mem_flags) 449 { 450 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 451 452 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) 453 return dma_alloc_coherent(&pdev->dev, 454 sizeof(struct xhci_stream_ctx)*num_stream_ctxs, 455 dma, mem_flags); 456 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) 457 return dma_pool_alloc(xhci->small_streams_pool, 458 mem_flags, dma); 459 else 460 return dma_pool_alloc(xhci->medium_streams_pool, 461 mem_flags, dma); 462 } 463 464 struct xhci_ring *xhci_dma_to_transfer_ring( 465 struct xhci_virt_ep *ep, 466 u64 address) 467 { 468 if (ep->ep_state & EP_HAS_STREAMS) 469 return radix_tree_lookup(&ep->stream_info->trb_address_map, 470 address >> TRB_SEGMENT_SHIFT); 471 return ep->ring; 472 } 473 474 /* Only use this when you know stream_info is valid */ 475 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 476 static struct xhci_ring *dma_to_stream_ring( 477 struct xhci_stream_info *stream_info, 478 u64 address) 479 { 480 return radix_tree_lookup(&stream_info->trb_address_map, 481 address >> TRB_SEGMENT_SHIFT); 482 } 483 #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */ 484 485 struct xhci_ring *xhci_stream_id_to_ring( 486 struct xhci_virt_device *dev, 487 unsigned int ep_index, 488 unsigned int stream_id) 489 { 490 struct xhci_virt_ep *ep = &dev->eps[ep_index]; 491 492 if (stream_id == 0) 493 return ep->ring; 494 if (!ep->stream_info) 495 return NULL; 496 497 if (stream_id > ep->stream_info->num_streams) 498 return NULL; 499 return ep->stream_info->stream_rings[stream_id]; 500 } 501 502 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 503 static int xhci_test_radix_tree(struct xhci_hcd *xhci, 504 unsigned int num_streams, 505 struct xhci_stream_info *stream_info) 506 { 507 u32 cur_stream; 508 struct xhci_ring *cur_ring; 509 u64 addr; 510 511 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 512 struct xhci_ring *mapped_ring; 513 int trb_size = sizeof(union xhci_trb); 514 515 cur_ring = stream_info->stream_rings[cur_stream]; 516 for (addr = cur_ring->first_seg->dma; 517 addr < cur_ring->first_seg->dma + TRB_SEGMENT_SIZE; 518 addr += trb_size) { 519 mapped_ring = dma_to_stream_ring(stream_info, addr); 520 if (cur_ring != mapped_ring) { 521 xhci_warn(xhci, "WARN: DMA address 0x%08llx " 522 "didn't map to stream ID %u; " 523 "mapped to ring %p\n", 524 (unsigned long long) addr, 525 cur_stream, 526 mapped_ring); 527 return -EINVAL; 528 } 529 } 530 /* One TRB after the end of the ring segment shouldn't return a 531 * pointer to the current ring (although it may be a part of a 532 * different ring). 533 */ 534 mapped_ring = dma_to_stream_ring(stream_info, addr); 535 if (mapped_ring != cur_ring) { 536 /* One TRB before should also fail */ 537 addr = cur_ring->first_seg->dma - trb_size; 538 mapped_ring = dma_to_stream_ring(stream_info, addr); 539 } 540 if (mapped_ring == cur_ring) { 541 xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx " 542 "mapped to valid stream ID %u; " 543 "mapped ring = %p\n", 544 (unsigned long long) addr, 545 cur_stream, 546 mapped_ring); 547 return -EINVAL; 548 } 549 } 550 return 0; 551 } 552 #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */ 553 554 /* 555 * Change an endpoint's internal structure so it supports stream IDs. The 556 * number of requested streams includes stream 0, which cannot be used by device 557 * drivers. 558 * 559 * The number of stream contexts in the stream context array may be bigger than 560 * the number of streams the driver wants to use. This is because the number of 561 * stream context array entries must be a power of two. 562 * 563 * We need a radix tree for mapping physical addresses of TRBs to which stream 564 * ID they belong to. We need to do this because the host controller won't tell 565 * us which stream ring the TRB came from. We could store the stream ID in an 566 * event data TRB, but that doesn't help us for the cancellation case, since the 567 * endpoint may stop before it reaches that event data TRB. 568 * 569 * The radix tree maps the upper portion of the TRB DMA address to a ring 570 * segment that has the same upper portion of DMA addresses. For example, say I 571 * have segments of size 1KB, that are always 64-byte aligned. A segment may 572 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the 573 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to 574 * pass the radix tree a key to get the right stream ID: 575 * 576 * 0x10c90fff >> 10 = 0x43243 577 * 0x10c912c0 >> 10 = 0x43244 578 * 0x10c91400 >> 10 = 0x43245 579 * 580 * Obviously, only those TRBs with DMA addresses that are within the segment 581 * will make the radix tree return the stream ID for that ring. 582 * 583 * Caveats for the radix tree: 584 * 585 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an 586 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be 587 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the 588 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit 589 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit 590 * extended systems (where the DMA address can be bigger than 32-bits), 591 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that. 592 */ 593 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, 594 unsigned int num_stream_ctxs, 595 unsigned int num_streams, gfp_t mem_flags) 596 { 597 struct xhci_stream_info *stream_info; 598 u32 cur_stream; 599 struct xhci_ring *cur_ring; 600 unsigned long key; 601 u64 addr; 602 int ret; 603 604 xhci_dbg(xhci, "Allocating %u streams and %u " 605 "stream context array entries.\n", 606 num_streams, num_stream_ctxs); 607 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) { 608 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n"); 609 return NULL; 610 } 611 xhci->cmd_ring_reserved_trbs++; 612 613 stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags); 614 if (!stream_info) 615 goto cleanup_trbs; 616 617 stream_info->num_streams = num_streams; 618 stream_info->num_stream_ctxs = num_stream_ctxs; 619 620 /* Initialize the array of virtual pointers to stream rings. */ 621 stream_info->stream_rings = kzalloc( 622 sizeof(struct xhci_ring *)*num_streams, 623 mem_flags); 624 if (!stream_info->stream_rings) 625 goto cleanup_info; 626 627 /* Initialize the array of DMA addresses for stream rings for the HW. */ 628 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci, 629 num_stream_ctxs, &stream_info->ctx_array_dma, 630 mem_flags); 631 if (!stream_info->stream_ctx_array) 632 goto cleanup_ctx; 633 memset(stream_info->stream_ctx_array, 0, 634 sizeof(struct xhci_stream_ctx)*num_stream_ctxs); 635 636 /* Allocate everything needed to free the stream rings later */ 637 stream_info->free_streams_command = 638 xhci_alloc_command(xhci, true, true, mem_flags); 639 if (!stream_info->free_streams_command) 640 goto cleanup_ctx; 641 642 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC); 643 644 /* Allocate rings for all the streams that the driver will use, 645 * and add their segment DMA addresses to the radix tree. 646 * Stream 0 is reserved. 647 */ 648 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 649 stream_info->stream_rings[cur_stream] = 650 xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags); 651 cur_ring = stream_info->stream_rings[cur_stream]; 652 if (!cur_ring) 653 goto cleanup_rings; 654 cur_ring->stream_id = cur_stream; 655 /* Set deq ptr, cycle bit, and stream context type */ 656 addr = cur_ring->first_seg->dma | 657 SCT_FOR_CTX(SCT_PRI_TR) | 658 cur_ring->cycle_state; 659 stream_info->stream_ctx_array[cur_stream].stream_ring = 660 cpu_to_le64(addr); 661 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", 662 cur_stream, (unsigned long long) addr); 663 664 key = (unsigned long) 665 (cur_ring->first_seg->dma >> TRB_SEGMENT_SHIFT); 666 ret = radix_tree_insert(&stream_info->trb_address_map, 667 key, cur_ring); 668 if (ret) { 669 xhci_ring_free(xhci, cur_ring); 670 stream_info->stream_rings[cur_stream] = NULL; 671 goto cleanup_rings; 672 } 673 } 674 /* Leave the other unused stream ring pointers in the stream context 675 * array initialized to zero. This will cause the xHC to give us an 676 * error if the device asks for a stream ID we don't have setup (if it 677 * was any other way, the host controller would assume the ring is 678 * "empty" and wait forever for data to be queued to that stream ID). 679 */ 680 #if XHCI_DEBUG 681 /* Do a little test on the radix tree to make sure it returns the 682 * correct values. 683 */ 684 if (xhci_test_radix_tree(xhci, num_streams, stream_info)) 685 goto cleanup_rings; 686 #endif 687 688 return stream_info; 689 690 cleanup_rings: 691 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 692 cur_ring = stream_info->stream_rings[cur_stream]; 693 if (cur_ring) { 694 addr = cur_ring->first_seg->dma; 695 radix_tree_delete(&stream_info->trb_address_map, 696 addr >> TRB_SEGMENT_SHIFT); 697 xhci_ring_free(xhci, cur_ring); 698 stream_info->stream_rings[cur_stream] = NULL; 699 } 700 } 701 xhci_free_command(xhci, stream_info->free_streams_command); 702 cleanup_ctx: 703 kfree(stream_info->stream_rings); 704 cleanup_info: 705 kfree(stream_info); 706 cleanup_trbs: 707 xhci->cmd_ring_reserved_trbs--; 708 return NULL; 709 } 710 /* 711 * Sets the MaxPStreams field and the Linear Stream Array field. 712 * Sets the dequeue pointer to the stream context array. 713 */ 714 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, 715 struct xhci_ep_ctx *ep_ctx, 716 struct xhci_stream_info *stream_info) 717 { 718 u32 max_primary_streams; 719 /* MaxPStreams is the number of stream context array entries, not the 720 * number we're actually using. Must be in 2^(MaxPstreams + 1) format. 721 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc. 722 */ 723 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; 724 xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n", 725 1 << (max_primary_streams + 1)); 726 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK); 727 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) 728 | EP_HAS_LSA); 729 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma); 730 } 731 732 /* 733 * Sets the MaxPStreams field and the Linear Stream Array field to 0. 734 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark, 735 * not at the beginning of the ring). 736 */ 737 void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, 738 struct xhci_ep_ctx *ep_ctx, 739 struct xhci_virt_ep *ep) 740 { 741 dma_addr_t addr; 742 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA)); 743 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); 744 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state); 745 } 746 747 /* Frees all stream contexts associated with the endpoint, 748 * 749 * Caller should fix the endpoint context streams fields. 750 */ 751 void xhci_free_stream_info(struct xhci_hcd *xhci, 752 struct xhci_stream_info *stream_info) 753 { 754 int cur_stream; 755 struct xhci_ring *cur_ring; 756 dma_addr_t addr; 757 758 if (!stream_info) 759 return; 760 761 for (cur_stream = 1; cur_stream < stream_info->num_streams; 762 cur_stream++) { 763 cur_ring = stream_info->stream_rings[cur_stream]; 764 if (cur_ring) { 765 addr = cur_ring->first_seg->dma; 766 radix_tree_delete(&stream_info->trb_address_map, 767 addr >> TRB_SEGMENT_SHIFT); 768 xhci_ring_free(xhci, cur_ring); 769 stream_info->stream_rings[cur_stream] = NULL; 770 } 771 } 772 xhci_free_command(xhci, stream_info->free_streams_command); 773 xhci->cmd_ring_reserved_trbs--; 774 if (stream_info->stream_ctx_array) 775 xhci_free_stream_ctx(xhci, 776 stream_info->num_stream_ctxs, 777 stream_info->stream_ctx_array, 778 stream_info->ctx_array_dma); 779 780 if (stream_info) 781 kfree(stream_info->stream_rings); 782 kfree(stream_info); 783 } 784 785 786 /***************** Device context manipulation *************************/ 787 788 static void xhci_init_endpoint_timer(struct xhci_hcd *xhci, 789 struct xhci_virt_ep *ep) 790 { 791 init_timer(&ep->stop_cmd_timer); 792 ep->stop_cmd_timer.data = (unsigned long) ep; 793 ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog; 794 ep->xhci = xhci; 795 } 796 797 static void xhci_free_tt_info(struct xhci_hcd *xhci, 798 struct xhci_virt_device *virt_dev, 799 int slot_id) 800 { 801 struct list_head *tt_list_head; 802 struct xhci_tt_bw_info *tt_info, *next; 803 bool slot_found = false; 804 805 /* If the device never made it past the Set Address stage, 806 * it may not have the real_port set correctly. 807 */ 808 if (virt_dev->real_port == 0 || 809 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) { 810 xhci_dbg(xhci, "Bad real port.\n"); 811 return; 812 } 813 814 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts); 815 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { 816 /* Multi-TT hubs will have more than one entry */ 817 if (tt_info->slot_id == slot_id) { 818 slot_found = true; 819 list_del(&tt_info->tt_list); 820 kfree(tt_info); 821 } else if (slot_found) { 822 break; 823 } 824 } 825 } 826 827 int xhci_alloc_tt_info(struct xhci_hcd *xhci, 828 struct xhci_virt_device *virt_dev, 829 struct usb_device *hdev, 830 struct usb_tt *tt, gfp_t mem_flags) 831 { 832 struct xhci_tt_bw_info *tt_info; 833 unsigned int num_ports; 834 int i, j; 835 836 if (!tt->multi) 837 num_ports = 1; 838 else 839 num_ports = hdev->maxchild; 840 841 for (i = 0; i < num_ports; i++, tt_info++) { 842 struct xhci_interval_bw_table *bw_table; 843 844 tt_info = kzalloc(sizeof(*tt_info), mem_flags); 845 if (!tt_info) 846 goto free_tts; 847 INIT_LIST_HEAD(&tt_info->tt_list); 848 list_add(&tt_info->tt_list, 849 &xhci->rh_bw[virt_dev->real_port - 1].tts); 850 tt_info->slot_id = virt_dev->udev->slot_id; 851 if (tt->multi) 852 tt_info->ttport = i+1; 853 bw_table = &tt_info->bw_table; 854 for (j = 0; j < XHCI_MAX_INTERVAL; j++) 855 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints); 856 } 857 return 0; 858 859 free_tts: 860 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id); 861 return -ENOMEM; 862 } 863 864 865 /* All the xhci_tds in the ring's TD list should be freed at this point. 866 * Should be called with xhci->lock held if there is any chance the TT lists 867 * will be manipulated by the configure endpoint, allocate device, or update 868 * hub functions while this function is removing the TT entries from the list. 869 */ 870 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) 871 { 872 struct xhci_virt_device *dev; 873 int i; 874 int old_active_eps = 0; 875 876 /* Slot ID 0 is reserved */ 877 if (slot_id == 0 || !xhci->devs[slot_id]) 878 return; 879 880 dev = xhci->devs[slot_id]; 881 xhci->dcbaa->dev_context_ptrs[slot_id] = 0; 882 if (!dev) 883 return; 884 885 if (dev->tt_info) 886 old_active_eps = dev->tt_info->active_eps; 887 888 for (i = 0; i < 31; ++i) { 889 if (dev->eps[i].ring) 890 xhci_ring_free(xhci, dev->eps[i].ring); 891 if (dev->eps[i].stream_info) 892 xhci_free_stream_info(xhci, 893 dev->eps[i].stream_info); 894 /* Endpoints on the TT/root port lists should have been removed 895 * when usb_disable_device() was called for the device. 896 * We can't drop them anyway, because the udev might have gone 897 * away by this point, and we can't tell what speed it was. 898 */ 899 if (!list_empty(&dev->eps[i].bw_endpoint_list)) 900 xhci_warn(xhci, "Slot %u endpoint %u " 901 "not removed from BW list!\n", 902 slot_id, i); 903 } 904 /* If this is a hub, free the TT(s) from the TT list */ 905 xhci_free_tt_info(xhci, dev, slot_id); 906 /* If necessary, update the number of active TTs on this root port */ 907 xhci_update_tt_active_eps(xhci, dev, old_active_eps); 908 909 if (dev->ring_cache) { 910 for (i = 0; i < dev->num_rings_cached; i++) 911 xhci_ring_free(xhci, dev->ring_cache[i]); 912 kfree(dev->ring_cache); 913 } 914 915 if (dev->in_ctx) 916 xhci_free_container_ctx(xhci, dev->in_ctx); 917 if (dev->out_ctx) 918 xhci_free_container_ctx(xhci, dev->out_ctx); 919 920 kfree(xhci->devs[slot_id]); 921 xhci->devs[slot_id] = NULL; 922 } 923 924 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, 925 struct usb_device *udev, gfp_t flags) 926 { 927 struct xhci_virt_device *dev; 928 int i; 929 930 /* Slot ID 0 is reserved */ 931 if (slot_id == 0 || xhci->devs[slot_id]) { 932 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id); 933 return 0; 934 } 935 936 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); 937 if (!xhci->devs[slot_id]) 938 return 0; 939 dev = xhci->devs[slot_id]; 940 941 /* Allocate the (output) device context that will be used in the HC. */ 942 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); 943 if (!dev->out_ctx) 944 goto fail; 945 946 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, 947 (unsigned long long)dev->out_ctx->dma); 948 949 /* Allocate the (input) device context for address device command */ 950 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); 951 if (!dev->in_ctx) 952 goto fail; 953 954 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, 955 (unsigned long long)dev->in_ctx->dma); 956 957 /* Initialize the cancellation list and watchdog timers for each ep */ 958 for (i = 0; i < 31; i++) { 959 xhci_init_endpoint_timer(xhci, &dev->eps[i]); 960 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); 961 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list); 962 } 963 964 /* Allocate endpoint 0 ring */ 965 dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags); 966 if (!dev->eps[0].ring) 967 goto fail; 968 969 /* Allocate pointers to the ring cache */ 970 dev->ring_cache = kzalloc( 971 sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED, 972 flags); 973 if (!dev->ring_cache) 974 goto fail; 975 dev->num_rings_cached = 0; 976 977 init_completion(&dev->cmd_completion); 978 INIT_LIST_HEAD(&dev->cmd_list); 979 dev->udev = udev; 980 981 /* Point to output device context in dcbaa. */ 982 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma); 983 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", 984 slot_id, 985 &xhci->dcbaa->dev_context_ptrs[slot_id], 986 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); 987 988 return 1; 989 fail: 990 xhci_free_virt_device(xhci, slot_id); 991 return 0; 992 } 993 994 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, 995 struct usb_device *udev) 996 { 997 struct xhci_virt_device *virt_dev; 998 struct xhci_ep_ctx *ep0_ctx; 999 struct xhci_ring *ep_ring; 1000 1001 virt_dev = xhci->devs[udev->slot_id]; 1002 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0); 1003 ep_ring = virt_dev->eps[0].ring; 1004 /* 1005 * FIXME we don't keep track of the dequeue pointer very well after a 1006 * Set TR dequeue pointer, so we're setting the dequeue pointer of the 1007 * host to our enqueue pointer. This should only be called after a 1008 * configured device has reset, so all control transfers should have 1009 * been completed or cancelled before the reset. 1010 */ 1011 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg, 1012 ep_ring->enqueue) 1013 | ep_ring->cycle_state); 1014 } 1015 1016 /* 1017 * The xHCI roothub may have ports of differing speeds in any order in the port 1018 * status registers. xhci->port_array provides an array of the port speed for 1019 * each offset into the port status registers. 1020 * 1021 * The xHCI hardware wants to know the roothub port number that the USB device 1022 * is attached to (or the roothub port its ancestor hub is attached to). All we 1023 * know is the index of that port under either the USB 2.0 or the USB 3.0 1024 * roothub, but that doesn't give us the real index into the HW port status 1025 * registers. Call xhci_find_raw_port_number() to get real index. 1026 */ 1027 static u32 xhci_find_real_port_number(struct xhci_hcd *xhci, 1028 struct usb_device *udev) 1029 { 1030 struct usb_device *top_dev; 1031 struct usb_hcd *hcd; 1032 1033 if (udev->speed == USB_SPEED_SUPER) 1034 hcd = xhci->shared_hcd; 1035 else 1036 hcd = xhci->main_hcd; 1037 1038 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 1039 top_dev = top_dev->parent) 1040 /* Found device below root hub */; 1041 1042 return xhci_find_raw_port_number(hcd, top_dev->portnum); 1043 } 1044 1045 /* Setup an xHCI virtual device for a Set Address command */ 1046 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) 1047 { 1048 struct xhci_virt_device *dev; 1049 struct xhci_ep_ctx *ep0_ctx; 1050 struct xhci_slot_ctx *slot_ctx; 1051 u32 port_num; 1052 struct usb_device *top_dev; 1053 1054 dev = xhci->devs[udev->slot_id]; 1055 /* Slot ID 0 is reserved */ 1056 if (udev->slot_id == 0 || !dev) { 1057 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n", 1058 udev->slot_id); 1059 return -EINVAL; 1060 } 1061 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); 1062 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); 1063 1064 /* 3) Only the control endpoint is valid - one endpoint context */ 1065 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); 1066 switch (udev->speed) { 1067 case USB_SPEED_SUPER: 1068 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS); 1069 break; 1070 case USB_SPEED_HIGH: 1071 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS); 1072 break; 1073 case USB_SPEED_FULL: 1074 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS); 1075 break; 1076 case USB_SPEED_LOW: 1077 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS); 1078 break; 1079 case USB_SPEED_WIRELESS: 1080 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 1081 return -EINVAL; 1082 break; 1083 default: 1084 /* Speed was set earlier, this shouldn't happen. */ 1085 BUG(); 1086 } 1087 /* Find the root hub port this device is under */ 1088 port_num = xhci_find_real_port_number(xhci, udev); 1089 if (!port_num) 1090 return -EINVAL; 1091 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num)); 1092 /* Set the port number in the virtual_device to the faked port number */ 1093 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 1094 top_dev = top_dev->parent) 1095 /* Found device below root hub */; 1096 dev->fake_port = top_dev->portnum; 1097 dev->real_port = port_num; 1098 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num); 1099 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port); 1100 1101 /* Find the right bandwidth table that this device will be a part of. 1102 * If this is a full speed device attached directly to a root port (or a 1103 * decendent of one), it counts as a primary bandwidth domain, not a 1104 * secondary bandwidth domain under a TT. An xhci_tt_info structure 1105 * will never be created for the HS root hub. 1106 */ 1107 if (!udev->tt || !udev->tt->hub->parent) { 1108 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table; 1109 } else { 1110 struct xhci_root_port_bw_info *rh_bw; 1111 struct xhci_tt_bw_info *tt_bw; 1112 1113 rh_bw = &xhci->rh_bw[port_num - 1]; 1114 /* Find the right TT. */ 1115 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) { 1116 if (tt_bw->slot_id != udev->tt->hub->slot_id) 1117 continue; 1118 1119 if (!dev->udev->tt->multi || 1120 (udev->tt->multi && 1121 tt_bw->ttport == dev->udev->ttport)) { 1122 dev->bw_table = &tt_bw->bw_table; 1123 dev->tt_info = tt_bw; 1124 break; 1125 } 1126 } 1127 if (!dev->tt_info) 1128 xhci_warn(xhci, "WARN: Didn't find a matching TT\n"); 1129 } 1130 1131 /* Is this a LS/FS device under an external HS hub? */ 1132 if (udev->tt && udev->tt->hub->parent) { 1133 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id | 1134 (udev->ttport << 8)); 1135 if (udev->tt->multi) 1136 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 1137 } 1138 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); 1139 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); 1140 1141 /* Step 4 - ring already allocated */ 1142 /* Step 5 */ 1143 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP)); 1144 /* 1145 * XXX: Not sure about wireless USB devices. 1146 */ 1147 switch (udev->speed) { 1148 case USB_SPEED_SUPER: 1149 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512)); 1150 break; 1151 case USB_SPEED_HIGH: 1152 /* USB core guesses at a 64-byte max packet first for FS devices */ 1153 case USB_SPEED_FULL: 1154 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64)); 1155 break; 1156 case USB_SPEED_LOW: 1157 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8)); 1158 break; 1159 case USB_SPEED_WIRELESS: 1160 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 1161 return -EINVAL; 1162 break; 1163 default: 1164 /* New speed? */ 1165 BUG(); 1166 } 1167 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ 1168 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3)); 1169 1170 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma | 1171 dev->eps[0].ring->cycle_state); 1172 1173 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ 1174 1175 return 0; 1176 } 1177 1178 /* 1179 * Convert interval expressed as 2^(bInterval - 1) == interval into 1180 * straight exponent value 2^n == interval. 1181 * 1182 */ 1183 static unsigned int xhci_parse_exponent_interval(struct usb_device *udev, 1184 struct usb_host_endpoint *ep) 1185 { 1186 unsigned int interval; 1187 1188 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1; 1189 if (interval != ep->desc.bInterval - 1) 1190 dev_warn(&udev->dev, 1191 "ep %#x - rounding interval to %d %sframes\n", 1192 ep->desc.bEndpointAddress, 1193 1 << interval, 1194 udev->speed == USB_SPEED_FULL ? "" : "micro"); 1195 1196 if (udev->speed == USB_SPEED_FULL) { 1197 /* 1198 * Full speed isoc endpoints specify interval in frames, 1199 * not microframes. We are using microframes everywhere, 1200 * so adjust accordingly. 1201 */ 1202 interval += 3; /* 1 frame = 2^3 uframes */ 1203 } 1204 1205 return interval; 1206 } 1207 1208 /* 1209 * Convert bInterval expressed in microframes (in 1-255 range) to exponent of 1210 * microframes, rounded down to nearest power of 2. 1211 */ 1212 static unsigned int xhci_microframes_to_exponent(struct usb_device *udev, 1213 struct usb_host_endpoint *ep, unsigned int desc_interval, 1214 unsigned int min_exponent, unsigned int max_exponent) 1215 { 1216 unsigned int interval; 1217 1218 interval = fls(desc_interval) - 1; 1219 interval = clamp_val(interval, min_exponent, max_exponent); 1220 if ((1 << interval) != desc_interval) 1221 dev_warn(&udev->dev, 1222 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n", 1223 ep->desc.bEndpointAddress, 1224 1 << interval, 1225 desc_interval); 1226 1227 return interval; 1228 } 1229 1230 static unsigned int xhci_parse_microframe_interval(struct usb_device *udev, 1231 struct usb_host_endpoint *ep) 1232 { 1233 if (ep->desc.bInterval == 0) 1234 return 0; 1235 return xhci_microframes_to_exponent(udev, ep, 1236 ep->desc.bInterval, 0, 15); 1237 } 1238 1239 1240 static unsigned int xhci_parse_frame_interval(struct usb_device *udev, 1241 struct usb_host_endpoint *ep) 1242 { 1243 return xhci_microframes_to_exponent(udev, ep, 1244 ep->desc.bInterval * 8, 3, 10); 1245 } 1246 1247 /* Return the polling or NAK interval. 1248 * 1249 * The polling interval is expressed in "microframes". If xHCI's Interval field 1250 * is set to N, it will service the endpoint every 2^(Interval)*125us. 1251 * 1252 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval 1253 * is set to 0. 1254 */ 1255 static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, 1256 struct usb_host_endpoint *ep) 1257 { 1258 unsigned int interval = 0; 1259 1260 switch (udev->speed) { 1261 case USB_SPEED_HIGH: 1262 /* Max NAK rate */ 1263 if (usb_endpoint_xfer_control(&ep->desc) || 1264 usb_endpoint_xfer_bulk(&ep->desc)) { 1265 interval = xhci_parse_microframe_interval(udev, ep); 1266 break; 1267 } 1268 /* Fall through - SS and HS isoc/int have same decoding */ 1269 1270 case USB_SPEED_SUPER: 1271 if (usb_endpoint_xfer_int(&ep->desc) || 1272 usb_endpoint_xfer_isoc(&ep->desc)) { 1273 interval = xhci_parse_exponent_interval(udev, ep); 1274 } 1275 break; 1276 1277 case USB_SPEED_FULL: 1278 if (usb_endpoint_xfer_isoc(&ep->desc)) { 1279 interval = xhci_parse_exponent_interval(udev, ep); 1280 break; 1281 } 1282 /* 1283 * Fall through for interrupt endpoint interval decoding 1284 * since it uses the same rules as low speed interrupt 1285 * endpoints. 1286 */ 1287 1288 case USB_SPEED_LOW: 1289 if (usb_endpoint_xfer_int(&ep->desc) || 1290 usb_endpoint_xfer_isoc(&ep->desc)) { 1291 1292 interval = xhci_parse_frame_interval(udev, ep); 1293 } 1294 break; 1295 1296 default: 1297 BUG(); 1298 } 1299 return EP_INTERVAL(interval); 1300 } 1301 1302 /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps. 1303 * High speed endpoint descriptors can define "the number of additional 1304 * transaction opportunities per microframe", but that goes in the Max Burst 1305 * endpoint context field. 1306 */ 1307 static u32 xhci_get_endpoint_mult(struct usb_device *udev, 1308 struct usb_host_endpoint *ep) 1309 { 1310 if (udev->speed != USB_SPEED_SUPER || 1311 !usb_endpoint_xfer_isoc(&ep->desc)) 1312 return 0; 1313 return ep->ss_ep_comp.bmAttributes; 1314 } 1315 1316 static u32 xhci_get_endpoint_type(struct usb_device *udev, 1317 struct usb_host_endpoint *ep) 1318 { 1319 int in; 1320 u32 type; 1321 1322 in = usb_endpoint_dir_in(&ep->desc); 1323 if (usb_endpoint_xfer_control(&ep->desc)) { 1324 type = EP_TYPE(CTRL_EP); 1325 } else if (usb_endpoint_xfer_bulk(&ep->desc)) { 1326 if (in) 1327 type = EP_TYPE(BULK_IN_EP); 1328 else 1329 type = EP_TYPE(BULK_OUT_EP); 1330 } else if (usb_endpoint_xfer_isoc(&ep->desc)) { 1331 if (in) 1332 type = EP_TYPE(ISOC_IN_EP); 1333 else 1334 type = EP_TYPE(ISOC_OUT_EP); 1335 } else if (usb_endpoint_xfer_int(&ep->desc)) { 1336 if (in) 1337 type = EP_TYPE(INT_IN_EP); 1338 else 1339 type = EP_TYPE(INT_OUT_EP); 1340 } else { 1341 BUG(); 1342 } 1343 return type; 1344 } 1345 1346 /* Return the maximum endpoint service interval time (ESIT) payload. 1347 * Basically, this is the maxpacket size, multiplied by the burst size 1348 * and mult size. 1349 */ 1350 static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, 1351 struct usb_device *udev, 1352 struct usb_host_endpoint *ep) 1353 { 1354 int max_burst; 1355 int max_packet; 1356 1357 /* Only applies for interrupt or isochronous endpoints */ 1358 if (usb_endpoint_xfer_control(&ep->desc) || 1359 usb_endpoint_xfer_bulk(&ep->desc)) 1360 return 0; 1361 1362 if (udev->speed == USB_SPEED_SUPER) 1363 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval); 1364 1365 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); 1366 max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11; 1367 /* A 0 in max burst means 1 transfer per ESIT */ 1368 return max_packet * (max_burst + 1); 1369 } 1370 1371 /* Set up an endpoint with one ring segment. Do not allocate stream rings. 1372 * Drivers will have to call usb_alloc_streams() to do that. 1373 */ 1374 int xhci_endpoint_init(struct xhci_hcd *xhci, 1375 struct xhci_virt_device *virt_dev, 1376 struct usb_device *udev, 1377 struct usb_host_endpoint *ep, 1378 gfp_t mem_flags) 1379 { 1380 unsigned int ep_index; 1381 struct xhci_ep_ctx *ep_ctx; 1382 struct xhci_ring *ep_ring; 1383 unsigned int max_packet; 1384 unsigned int max_burst; 1385 enum xhci_ring_type type; 1386 u32 max_esit_payload; 1387 1388 ep_index = xhci_get_endpoint_index(&ep->desc); 1389 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 1390 1391 type = usb_endpoint_type(&ep->desc); 1392 /* Set up the endpoint ring */ 1393 virt_dev->eps[ep_index].new_ring = 1394 xhci_ring_alloc(xhci, 2, 1, type, mem_flags); 1395 if (!virt_dev->eps[ep_index].new_ring) { 1396 /* Attempt to use the ring cache */ 1397 if (virt_dev->num_rings_cached == 0) 1398 return -ENOMEM; 1399 virt_dev->eps[ep_index].new_ring = 1400 virt_dev->ring_cache[virt_dev->num_rings_cached]; 1401 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; 1402 virt_dev->num_rings_cached--; 1403 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, 1404 1, type); 1405 } 1406 virt_dev->eps[ep_index].skip = false; 1407 ep_ring = virt_dev->eps[ep_index].new_ring; 1408 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state); 1409 1410 ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep) 1411 | EP_MULT(xhci_get_endpoint_mult(udev, ep))); 1412 1413 /* FIXME dig Mult and streams info out of ep companion desc */ 1414 1415 /* Allow 3 retries for everything but isoc; 1416 * CErr shall be set to 0 for Isoch endpoints. 1417 */ 1418 if (!usb_endpoint_xfer_isoc(&ep->desc)) 1419 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(3)); 1420 else 1421 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(0)); 1422 1423 ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep)); 1424 1425 /* Set the max packet size and max burst */ 1426 switch (udev->speed) { 1427 case USB_SPEED_SUPER: 1428 max_packet = usb_endpoint_maxp(&ep->desc); 1429 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); 1430 /* dig out max burst from ep companion desc */ 1431 max_packet = ep->ss_ep_comp.bMaxBurst; 1432 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet)); 1433 break; 1434 case USB_SPEED_HIGH: 1435 /* bits 11:12 specify the number of additional transaction 1436 * opportunities per microframe (USB 2.0, section 9.6.6) 1437 */ 1438 if (usb_endpoint_xfer_isoc(&ep->desc) || 1439 usb_endpoint_xfer_int(&ep->desc)) { 1440 max_burst = (usb_endpoint_maxp(&ep->desc) 1441 & 0x1800) >> 11; 1442 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst)); 1443 } 1444 /* Fall through */ 1445 case USB_SPEED_FULL: 1446 case USB_SPEED_LOW: 1447 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); 1448 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); 1449 break; 1450 default: 1451 BUG(); 1452 } 1453 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); 1454 ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload)); 1455 1456 /* 1457 * XXX no idea how to calculate the average TRB buffer length for bulk 1458 * endpoints, as the driver gives us no clue how big each scatter gather 1459 * list entry (or buffer) is going to be. 1460 * 1461 * For isochronous and interrupt endpoints, we set it to the max 1462 * available, until we have new API in the USB core to allow drivers to 1463 * declare how much bandwidth they actually need. 1464 * 1465 * Normally, it would be calculated by taking the total of the buffer 1466 * lengths in the TD and then dividing by the number of TRBs in a TD, 1467 * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't 1468 * use Event Data TRBs, and we don't chain in a link TRB on short 1469 * transfers, we're basically dividing by 1. 1470 * 1471 * xHCI 1.0 specification indicates that the Average TRB Length should 1472 * be set to 8 for control endpoints. 1473 */ 1474 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100) 1475 ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8)); 1476 else 1477 ep_ctx->tx_info |= 1478 cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload)); 1479 1480 /* FIXME Debug endpoint context */ 1481 return 0; 1482 } 1483 1484 void xhci_endpoint_zero(struct xhci_hcd *xhci, 1485 struct xhci_virt_device *virt_dev, 1486 struct usb_host_endpoint *ep) 1487 { 1488 unsigned int ep_index; 1489 struct xhci_ep_ctx *ep_ctx; 1490 1491 ep_index = xhci_get_endpoint_index(&ep->desc); 1492 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 1493 1494 ep_ctx->ep_info = 0; 1495 ep_ctx->ep_info2 = 0; 1496 ep_ctx->deq = 0; 1497 ep_ctx->tx_info = 0; 1498 /* Don't free the endpoint ring until the set interface or configuration 1499 * request succeeds. 1500 */ 1501 } 1502 1503 void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info) 1504 { 1505 bw_info->ep_interval = 0; 1506 bw_info->mult = 0; 1507 bw_info->num_packets = 0; 1508 bw_info->max_packet_size = 0; 1509 bw_info->type = 0; 1510 bw_info->max_esit_payload = 0; 1511 } 1512 1513 void xhci_update_bw_info(struct xhci_hcd *xhci, 1514 struct xhci_container_ctx *in_ctx, 1515 struct xhci_input_control_ctx *ctrl_ctx, 1516 struct xhci_virt_device *virt_dev) 1517 { 1518 struct xhci_bw_info *bw_info; 1519 struct xhci_ep_ctx *ep_ctx; 1520 unsigned int ep_type; 1521 int i; 1522 1523 for (i = 1; i < 31; ++i) { 1524 bw_info = &virt_dev->eps[i].bw_info; 1525 1526 /* We can't tell what endpoint type is being dropped, but 1527 * unconditionally clearing the bandwidth info for non-periodic 1528 * endpoints should be harmless because the info will never be 1529 * set in the first place. 1530 */ 1531 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) { 1532 /* Dropped endpoint */ 1533 xhci_clear_endpoint_bw_info(bw_info); 1534 continue; 1535 } 1536 1537 if (EP_IS_ADDED(ctrl_ctx, i)) { 1538 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i); 1539 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2)); 1540 1541 /* Ignore non-periodic endpoints */ 1542 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && 1543 ep_type != ISOC_IN_EP && 1544 ep_type != INT_IN_EP) 1545 continue; 1546 1547 /* Added or changed endpoint */ 1548 bw_info->ep_interval = CTX_TO_EP_INTERVAL( 1549 le32_to_cpu(ep_ctx->ep_info)); 1550 /* Number of packets and mult are zero-based in the 1551 * input context, but we want one-based for the 1552 * interval table. 1553 */ 1554 bw_info->mult = CTX_TO_EP_MULT( 1555 le32_to_cpu(ep_ctx->ep_info)) + 1; 1556 bw_info->num_packets = CTX_TO_MAX_BURST( 1557 le32_to_cpu(ep_ctx->ep_info2)) + 1; 1558 bw_info->max_packet_size = MAX_PACKET_DECODED( 1559 le32_to_cpu(ep_ctx->ep_info2)); 1560 bw_info->type = ep_type; 1561 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD( 1562 le32_to_cpu(ep_ctx->tx_info)); 1563 } 1564 } 1565 } 1566 1567 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy. 1568 * Useful when you want to change one particular aspect of the endpoint and then 1569 * issue a configure endpoint command. 1570 */ 1571 void xhci_endpoint_copy(struct xhci_hcd *xhci, 1572 struct xhci_container_ctx *in_ctx, 1573 struct xhci_container_ctx *out_ctx, 1574 unsigned int ep_index) 1575 { 1576 struct xhci_ep_ctx *out_ep_ctx; 1577 struct xhci_ep_ctx *in_ep_ctx; 1578 1579 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1580 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 1581 1582 in_ep_ctx->ep_info = out_ep_ctx->ep_info; 1583 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; 1584 in_ep_ctx->deq = out_ep_ctx->deq; 1585 in_ep_ctx->tx_info = out_ep_ctx->tx_info; 1586 } 1587 1588 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx. 1589 * Useful when you want to change one particular aspect of the endpoint and then 1590 * issue a configure endpoint command. Only the context entries field matters, 1591 * but we'll copy the whole thing anyway. 1592 */ 1593 void xhci_slot_copy(struct xhci_hcd *xhci, 1594 struct xhci_container_ctx *in_ctx, 1595 struct xhci_container_ctx *out_ctx) 1596 { 1597 struct xhci_slot_ctx *in_slot_ctx; 1598 struct xhci_slot_ctx *out_slot_ctx; 1599 1600 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 1601 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx); 1602 1603 in_slot_ctx->dev_info = out_slot_ctx->dev_info; 1604 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; 1605 in_slot_ctx->tt_info = out_slot_ctx->tt_info; 1606 in_slot_ctx->dev_state = out_slot_ctx->dev_state; 1607 } 1608 1609 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ 1610 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) 1611 { 1612 int i; 1613 struct device *dev = xhci_to_hcd(xhci)->self.controller; 1614 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 1615 1616 xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp); 1617 1618 if (!num_sp) 1619 return 0; 1620 1621 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags); 1622 if (!xhci->scratchpad) 1623 goto fail_sp; 1624 1625 xhci->scratchpad->sp_array = dma_alloc_coherent(dev, 1626 num_sp * sizeof(u64), 1627 &xhci->scratchpad->sp_dma, flags); 1628 if (!xhci->scratchpad->sp_array) 1629 goto fail_sp2; 1630 1631 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags); 1632 if (!xhci->scratchpad->sp_buffers) 1633 goto fail_sp3; 1634 1635 xhci->scratchpad->sp_dma_buffers = 1636 kzalloc(sizeof(dma_addr_t) * num_sp, flags); 1637 1638 if (!xhci->scratchpad->sp_dma_buffers) 1639 goto fail_sp4; 1640 1641 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); 1642 for (i = 0; i < num_sp; i++) { 1643 dma_addr_t dma; 1644 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma, 1645 flags); 1646 if (!buf) 1647 goto fail_sp5; 1648 1649 xhci->scratchpad->sp_array[i] = dma; 1650 xhci->scratchpad->sp_buffers[i] = buf; 1651 xhci->scratchpad->sp_dma_buffers[i] = dma; 1652 } 1653 1654 return 0; 1655 1656 fail_sp5: 1657 for (i = i - 1; i >= 0; i--) { 1658 dma_free_coherent(dev, xhci->page_size, 1659 xhci->scratchpad->sp_buffers[i], 1660 xhci->scratchpad->sp_dma_buffers[i]); 1661 } 1662 kfree(xhci->scratchpad->sp_dma_buffers); 1663 1664 fail_sp4: 1665 kfree(xhci->scratchpad->sp_buffers); 1666 1667 fail_sp3: 1668 dma_free_coherent(dev, num_sp * sizeof(u64), 1669 xhci->scratchpad->sp_array, 1670 xhci->scratchpad->sp_dma); 1671 1672 fail_sp2: 1673 kfree(xhci->scratchpad); 1674 xhci->scratchpad = NULL; 1675 1676 fail_sp: 1677 return -ENOMEM; 1678 } 1679 1680 static void scratchpad_free(struct xhci_hcd *xhci) 1681 { 1682 int num_sp; 1683 int i; 1684 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 1685 1686 if (!xhci->scratchpad) 1687 return; 1688 1689 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 1690 1691 for (i = 0; i < num_sp; i++) { 1692 dma_free_coherent(&pdev->dev, xhci->page_size, 1693 xhci->scratchpad->sp_buffers[i], 1694 xhci->scratchpad->sp_dma_buffers[i]); 1695 } 1696 kfree(xhci->scratchpad->sp_dma_buffers); 1697 kfree(xhci->scratchpad->sp_buffers); 1698 dma_free_coherent(&pdev->dev, num_sp * sizeof(u64), 1699 xhci->scratchpad->sp_array, 1700 xhci->scratchpad->sp_dma); 1701 kfree(xhci->scratchpad); 1702 xhci->scratchpad = NULL; 1703 } 1704 1705 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, 1706 bool allocate_in_ctx, bool allocate_completion, 1707 gfp_t mem_flags) 1708 { 1709 struct xhci_command *command; 1710 1711 command = kzalloc(sizeof(*command), mem_flags); 1712 if (!command) 1713 return NULL; 1714 1715 if (allocate_in_ctx) { 1716 command->in_ctx = 1717 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, 1718 mem_flags); 1719 if (!command->in_ctx) { 1720 kfree(command); 1721 return NULL; 1722 } 1723 } 1724 1725 if (allocate_completion) { 1726 command->completion = 1727 kzalloc(sizeof(struct completion), mem_flags); 1728 if (!command->completion) { 1729 xhci_free_container_ctx(xhci, command->in_ctx); 1730 kfree(command); 1731 return NULL; 1732 } 1733 init_completion(command->completion); 1734 } 1735 1736 command->status = 0; 1737 INIT_LIST_HEAD(&command->cmd_list); 1738 return command; 1739 } 1740 1741 void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv) 1742 { 1743 if (urb_priv) { 1744 kfree(urb_priv->td[0]); 1745 kfree(urb_priv); 1746 } 1747 } 1748 1749 void xhci_free_command(struct xhci_hcd *xhci, 1750 struct xhci_command *command) 1751 { 1752 xhci_free_container_ctx(xhci, 1753 command->in_ctx); 1754 kfree(command->completion); 1755 kfree(command); 1756 } 1757 1758 void xhci_mem_cleanup(struct xhci_hcd *xhci) 1759 { 1760 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 1761 struct dev_info *dev_info, *next; 1762 struct xhci_cd *cur_cd, *next_cd; 1763 unsigned long flags; 1764 int size; 1765 int i, j, num_ports; 1766 1767 /* Free the Event Ring Segment Table and the actual Event Ring */ 1768 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 1769 if (xhci->erst.entries) 1770 dma_free_coherent(&pdev->dev, size, 1771 xhci->erst.entries, xhci->erst.erst_dma_addr); 1772 xhci->erst.entries = NULL; 1773 xhci_dbg(xhci, "Freed ERST\n"); 1774 if (xhci->event_ring) 1775 xhci_ring_free(xhci, xhci->event_ring); 1776 xhci->event_ring = NULL; 1777 xhci_dbg(xhci, "Freed event ring\n"); 1778 1779 if (xhci->lpm_command) 1780 xhci_free_command(xhci, xhci->lpm_command); 1781 xhci->cmd_ring_reserved_trbs = 0; 1782 if (xhci->cmd_ring) 1783 xhci_ring_free(xhci, xhci->cmd_ring); 1784 xhci->cmd_ring = NULL; 1785 xhci_dbg(xhci, "Freed command ring\n"); 1786 list_for_each_entry_safe(cur_cd, next_cd, 1787 &xhci->cancel_cmd_list, cancel_cmd_list) { 1788 list_del(&cur_cd->cancel_cmd_list); 1789 kfree(cur_cd); 1790 } 1791 1792 for (i = 1; i < MAX_HC_SLOTS; ++i) 1793 xhci_free_virt_device(xhci, i); 1794 1795 if (xhci->segment_pool) 1796 dma_pool_destroy(xhci->segment_pool); 1797 xhci->segment_pool = NULL; 1798 xhci_dbg(xhci, "Freed segment pool\n"); 1799 1800 if (xhci->device_pool) 1801 dma_pool_destroy(xhci->device_pool); 1802 xhci->device_pool = NULL; 1803 xhci_dbg(xhci, "Freed device context pool\n"); 1804 1805 if (xhci->small_streams_pool) 1806 dma_pool_destroy(xhci->small_streams_pool); 1807 xhci->small_streams_pool = NULL; 1808 xhci_dbg(xhci, "Freed small stream array pool\n"); 1809 1810 if (xhci->medium_streams_pool) 1811 dma_pool_destroy(xhci->medium_streams_pool); 1812 xhci->medium_streams_pool = NULL; 1813 xhci_dbg(xhci, "Freed medium stream array pool\n"); 1814 1815 if (xhci->dcbaa) 1816 dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa), 1817 xhci->dcbaa, xhci->dcbaa->dma); 1818 xhci->dcbaa = NULL; 1819 1820 scratchpad_free(xhci); 1821 1822 spin_lock_irqsave(&xhci->lock, flags); 1823 list_for_each_entry_safe(dev_info, next, &xhci->lpm_failed_devs, list) { 1824 list_del(&dev_info->list); 1825 kfree(dev_info); 1826 } 1827 spin_unlock_irqrestore(&xhci->lock, flags); 1828 1829 num_ports = HCS_MAX_PORTS(xhci->hcs_params1); 1830 for (i = 0; i < num_ports; i++) { 1831 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; 1832 for (j = 0; j < XHCI_MAX_INTERVAL; j++) { 1833 struct list_head *ep = &bwt->interval_bw[j].endpoints; 1834 while (!list_empty(ep)) 1835 list_del_init(ep->next); 1836 } 1837 } 1838 1839 for (i = 0; i < num_ports; i++) { 1840 struct xhci_tt_bw_info *tt, *n; 1841 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) { 1842 list_del(&tt->tt_list); 1843 kfree(tt); 1844 } 1845 } 1846 1847 xhci->num_usb2_ports = 0; 1848 xhci->num_usb3_ports = 0; 1849 xhci->num_active_eps = 0; 1850 kfree(xhci->usb2_ports); 1851 kfree(xhci->usb3_ports); 1852 kfree(xhci->port_array); 1853 kfree(xhci->rh_bw); 1854 1855 xhci->page_size = 0; 1856 xhci->page_shift = 0; 1857 xhci->bus_state[0].bus_suspended = 0; 1858 xhci->bus_state[1].bus_suspended = 0; 1859 } 1860 1861 static int xhci_test_trb_in_td(struct xhci_hcd *xhci, 1862 struct xhci_segment *input_seg, 1863 union xhci_trb *start_trb, 1864 union xhci_trb *end_trb, 1865 dma_addr_t input_dma, 1866 struct xhci_segment *result_seg, 1867 char *test_name, int test_number) 1868 { 1869 unsigned long long start_dma; 1870 unsigned long long end_dma; 1871 struct xhci_segment *seg; 1872 1873 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb); 1874 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb); 1875 1876 seg = trb_in_td(input_seg, start_trb, end_trb, input_dma); 1877 if (seg != result_seg) { 1878 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n", 1879 test_name, test_number); 1880 xhci_warn(xhci, "Tested TRB math w/ seg %p and " 1881 "input DMA 0x%llx\n", 1882 input_seg, 1883 (unsigned long long) input_dma); 1884 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), " 1885 "ending TRB %p (0x%llx DMA)\n", 1886 start_trb, start_dma, 1887 end_trb, end_dma); 1888 xhci_warn(xhci, "Expected seg %p, got seg %p\n", 1889 result_seg, seg); 1890 return -1; 1891 } 1892 return 0; 1893 } 1894 1895 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */ 1896 static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags) 1897 { 1898 struct { 1899 dma_addr_t input_dma; 1900 struct xhci_segment *result_seg; 1901 } simple_test_vector [] = { 1902 /* A zeroed DMA field should fail */ 1903 { 0, NULL }, 1904 /* One TRB before the ring start should fail */ 1905 { xhci->event_ring->first_seg->dma - 16, NULL }, 1906 /* One byte before the ring start should fail */ 1907 { xhci->event_ring->first_seg->dma - 1, NULL }, 1908 /* Starting TRB should succeed */ 1909 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg }, 1910 /* Ending TRB should succeed */ 1911 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16, 1912 xhci->event_ring->first_seg }, 1913 /* One byte after the ring end should fail */ 1914 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL }, 1915 /* One TRB after the ring end should fail */ 1916 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL }, 1917 /* An address of all ones should fail */ 1918 { (dma_addr_t) (~0), NULL }, 1919 }; 1920 struct { 1921 struct xhci_segment *input_seg; 1922 union xhci_trb *start_trb; 1923 union xhci_trb *end_trb; 1924 dma_addr_t input_dma; 1925 struct xhci_segment *result_seg; 1926 } complex_test_vector [] = { 1927 /* Test feeding a valid DMA address from a different ring */ 1928 { .input_seg = xhci->event_ring->first_seg, 1929 .start_trb = xhci->event_ring->first_seg->trbs, 1930 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 1931 .input_dma = xhci->cmd_ring->first_seg->dma, 1932 .result_seg = NULL, 1933 }, 1934 /* Test feeding a valid end TRB from a different ring */ 1935 { .input_seg = xhci->event_ring->first_seg, 1936 .start_trb = xhci->event_ring->first_seg->trbs, 1937 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 1938 .input_dma = xhci->cmd_ring->first_seg->dma, 1939 .result_seg = NULL, 1940 }, 1941 /* Test feeding a valid start and end TRB from a different ring */ 1942 { .input_seg = xhci->event_ring->first_seg, 1943 .start_trb = xhci->cmd_ring->first_seg->trbs, 1944 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 1945 .input_dma = xhci->cmd_ring->first_seg->dma, 1946 .result_seg = NULL, 1947 }, 1948 /* TRB in this ring, but after this TD */ 1949 { .input_seg = xhci->event_ring->first_seg, 1950 .start_trb = &xhci->event_ring->first_seg->trbs[0], 1951 .end_trb = &xhci->event_ring->first_seg->trbs[3], 1952 .input_dma = xhci->event_ring->first_seg->dma + 4*16, 1953 .result_seg = NULL, 1954 }, 1955 /* TRB in this ring, but before this TD */ 1956 { .input_seg = xhci->event_ring->first_seg, 1957 .start_trb = &xhci->event_ring->first_seg->trbs[3], 1958 .end_trb = &xhci->event_ring->first_seg->trbs[6], 1959 .input_dma = xhci->event_ring->first_seg->dma + 2*16, 1960 .result_seg = NULL, 1961 }, 1962 /* TRB in this ring, but after this wrapped TD */ 1963 { .input_seg = xhci->event_ring->first_seg, 1964 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], 1965 .end_trb = &xhci->event_ring->first_seg->trbs[1], 1966 .input_dma = xhci->event_ring->first_seg->dma + 2*16, 1967 .result_seg = NULL, 1968 }, 1969 /* TRB in this ring, but before this wrapped TD */ 1970 { .input_seg = xhci->event_ring->first_seg, 1971 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], 1972 .end_trb = &xhci->event_ring->first_seg->trbs[1], 1973 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16, 1974 .result_seg = NULL, 1975 }, 1976 /* TRB not in this ring, and we have a wrapped TD */ 1977 { .input_seg = xhci->event_ring->first_seg, 1978 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], 1979 .end_trb = &xhci->event_ring->first_seg->trbs[1], 1980 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16, 1981 .result_seg = NULL, 1982 }, 1983 }; 1984 1985 unsigned int num_tests; 1986 int i, ret; 1987 1988 num_tests = ARRAY_SIZE(simple_test_vector); 1989 for (i = 0; i < num_tests; i++) { 1990 ret = xhci_test_trb_in_td(xhci, 1991 xhci->event_ring->first_seg, 1992 xhci->event_ring->first_seg->trbs, 1993 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 1994 simple_test_vector[i].input_dma, 1995 simple_test_vector[i].result_seg, 1996 "Simple", i); 1997 if (ret < 0) 1998 return ret; 1999 } 2000 2001 num_tests = ARRAY_SIZE(complex_test_vector); 2002 for (i = 0; i < num_tests; i++) { 2003 ret = xhci_test_trb_in_td(xhci, 2004 complex_test_vector[i].input_seg, 2005 complex_test_vector[i].start_trb, 2006 complex_test_vector[i].end_trb, 2007 complex_test_vector[i].input_dma, 2008 complex_test_vector[i].result_seg, 2009 "Complex", i); 2010 if (ret < 0) 2011 return ret; 2012 } 2013 xhci_dbg(xhci, "TRB math tests passed.\n"); 2014 return 0; 2015 } 2016 2017 static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) 2018 { 2019 u64 temp; 2020 dma_addr_t deq; 2021 2022 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, 2023 xhci->event_ring->dequeue); 2024 if (deq == 0 && !in_interrupt()) 2025 xhci_warn(xhci, "WARN something wrong with SW event ring " 2026 "dequeue ptr.\n"); 2027 /* Update HC event ring dequeue pointer */ 2028 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2029 temp &= ERST_PTR_MASK; 2030 /* Don't clear the EHB bit (which is RW1C) because 2031 * there might be more events to service. 2032 */ 2033 temp &= ~ERST_EHB; 2034 xhci_dbg(xhci, "// Write event ring dequeue pointer, " 2035 "preserving EHB bit\n"); 2036 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, 2037 &xhci->ir_set->erst_dequeue); 2038 } 2039 2040 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, 2041 __le32 __iomem *addr, u8 major_revision) 2042 { 2043 u32 temp, port_offset, port_count; 2044 int i; 2045 2046 if (major_revision > 0x03) { 2047 xhci_warn(xhci, "Ignoring unknown port speed, " 2048 "Ext Cap %p, revision = 0x%x\n", 2049 addr, major_revision); 2050 /* Ignoring port protocol we can't understand. FIXME */ 2051 return; 2052 } 2053 2054 /* Port offset and count in the third dword, see section 7.2 */ 2055 temp = xhci_readl(xhci, addr + 2); 2056 port_offset = XHCI_EXT_PORT_OFF(temp); 2057 port_count = XHCI_EXT_PORT_COUNT(temp); 2058 xhci_dbg(xhci, "Ext Cap %p, port offset = %u, " 2059 "count = %u, revision = 0x%x\n", 2060 addr, port_offset, port_count, major_revision); 2061 /* Port count includes the current port offset */ 2062 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports) 2063 /* WTF? "Valid values are ‘1’ to MaxPorts" */ 2064 return; 2065 2066 /* Check the host's USB2 LPM capability */ 2067 if ((xhci->hci_version == 0x96) && (major_revision != 0x03) && 2068 (temp & XHCI_L1C)) { 2069 xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n"); 2070 xhci->sw_lpm_support = 1; 2071 } 2072 2073 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) { 2074 xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n"); 2075 xhci->sw_lpm_support = 1; 2076 if (temp & XHCI_HLC) { 2077 xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n"); 2078 xhci->hw_lpm_support = 1; 2079 } 2080 } 2081 2082 port_offset--; 2083 for (i = port_offset; i < (port_offset + port_count); i++) { 2084 /* Duplicate entry. Ignore the port if the revisions differ. */ 2085 if (xhci->port_array[i] != 0) { 2086 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p," 2087 " port %u\n", addr, i); 2088 xhci_warn(xhci, "Port was marked as USB %u, " 2089 "duplicated as USB %u\n", 2090 xhci->port_array[i], major_revision); 2091 /* Only adjust the roothub port counts if we haven't 2092 * found a similar duplicate. 2093 */ 2094 if (xhci->port_array[i] != major_revision && 2095 xhci->port_array[i] != DUPLICATE_ENTRY) { 2096 if (xhci->port_array[i] == 0x03) 2097 xhci->num_usb3_ports--; 2098 else 2099 xhci->num_usb2_ports--; 2100 xhci->port_array[i] = DUPLICATE_ENTRY; 2101 } 2102 /* FIXME: Should we disable the port? */ 2103 continue; 2104 } 2105 xhci->port_array[i] = major_revision; 2106 if (major_revision == 0x03) 2107 xhci->num_usb3_ports++; 2108 else 2109 xhci->num_usb2_ports++; 2110 } 2111 /* FIXME: Should we disable ports not in the Extended Capabilities? */ 2112 } 2113 2114 /* 2115 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that 2116 * specify what speeds each port is supposed to be. We can't count on the port 2117 * speed bits in the PORTSC register being correct until a device is connected, 2118 * but we need to set up the two fake roothubs with the correct number of USB 2119 * 3.0 and USB 2.0 ports at host controller initialization time. 2120 */ 2121 static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) 2122 { 2123 __le32 __iomem *addr; 2124 u32 offset; 2125 unsigned int num_ports; 2126 int i, j, port_index; 2127 2128 addr = &xhci->cap_regs->hcc_params; 2129 offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr)); 2130 if (offset == 0) { 2131 xhci_err(xhci, "No Extended Capability registers, " 2132 "unable to set up roothub.\n"); 2133 return -ENODEV; 2134 } 2135 2136 num_ports = HCS_MAX_PORTS(xhci->hcs_params1); 2137 xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags); 2138 if (!xhci->port_array) 2139 return -ENOMEM; 2140 2141 xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags); 2142 if (!xhci->rh_bw) 2143 return -ENOMEM; 2144 for (i = 0; i < num_ports; i++) { 2145 struct xhci_interval_bw_table *bw_table; 2146 2147 INIT_LIST_HEAD(&xhci->rh_bw[i].tts); 2148 bw_table = &xhci->rh_bw[i].bw_table; 2149 for (j = 0; j < XHCI_MAX_INTERVAL; j++) 2150 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints); 2151 } 2152 2153 /* 2154 * For whatever reason, the first capability offset is from the 2155 * capability register base, not from the HCCPARAMS register. 2156 * See section 5.3.6 for offset calculation. 2157 */ 2158 addr = &xhci->cap_regs->hc_capbase + offset; 2159 while (1) { 2160 u32 cap_id; 2161 2162 cap_id = xhci_readl(xhci, addr); 2163 if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL) 2164 xhci_add_in_port(xhci, num_ports, addr, 2165 (u8) XHCI_EXT_PORT_MAJOR(cap_id)); 2166 offset = XHCI_EXT_CAPS_NEXT(cap_id); 2167 if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports) 2168 == num_ports) 2169 break; 2170 /* 2171 * Once you're into the Extended Capabilities, the offset is 2172 * always relative to the register holding the offset. 2173 */ 2174 addr += offset; 2175 } 2176 2177 if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) { 2178 xhci_warn(xhci, "No ports on the roothubs?\n"); 2179 return -ENODEV; 2180 } 2181 xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n", 2182 xhci->num_usb2_ports, xhci->num_usb3_ports); 2183 2184 /* Place limits on the number of roothub ports so that the hub 2185 * descriptors aren't longer than the USB core will allocate. 2186 */ 2187 if (xhci->num_usb3_ports > 15) { 2188 xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n"); 2189 xhci->num_usb3_ports = 15; 2190 } 2191 if (xhci->num_usb2_ports > USB_MAXCHILDREN) { 2192 xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n", 2193 USB_MAXCHILDREN); 2194 xhci->num_usb2_ports = USB_MAXCHILDREN; 2195 } 2196 2197 /* 2198 * Note we could have all USB 3.0 ports, or all USB 2.0 ports. 2199 * Not sure how the USB core will handle a hub with no ports... 2200 */ 2201 if (xhci->num_usb2_ports) { 2202 xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)* 2203 xhci->num_usb2_ports, flags); 2204 if (!xhci->usb2_ports) 2205 return -ENOMEM; 2206 2207 port_index = 0; 2208 for (i = 0; i < num_ports; i++) { 2209 if (xhci->port_array[i] == 0x03 || 2210 xhci->port_array[i] == 0 || 2211 xhci->port_array[i] == DUPLICATE_ENTRY) 2212 continue; 2213 2214 xhci->usb2_ports[port_index] = 2215 &xhci->op_regs->port_status_base + 2216 NUM_PORT_REGS*i; 2217 xhci_dbg(xhci, "USB 2.0 port at index %u, " 2218 "addr = %p\n", i, 2219 xhci->usb2_ports[port_index]); 2220 port_index++; 2221 if (port_index == xhci->num_usb2_ports) 2222 break; 2223 } 2224 } 2225 if (xhci->num_usb3_ports) { 2226 xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)* 2227 xhci->num_usb3_ports, flags); 2228 if (!xhci->usb3_ports) 2229 return -ENOMEM; 2230 2231 port_index = 0; 2232 for (i = 0; i < num_ports; i++) 2233 if (xhci->port_array[i] == 0x03) { 2234 xhci->usb3_ports[port_index] = 2235 &xhci->op_regs->port_status_base + 2236 NUM_PORT_REGS*i; 2237 xhci_dbg(xhci, "USB 3.0 port at index %u, " 2238 "addr = %p\n", i, 2239 xhci->usb3_ports[port_index]); 2240 port_index++; 2241 if (port_index == xhci->num_usb3_ports) 2242 break; 2243 } 2244 } 2245 return 0; 2246 } 2247 2248 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) 2249 { 2250 dma_addr_t dma; 2251 struct device *dev = xhci_to_hcd(xhci)->self.controller; 2252 unsigned int val, val2; 2253 u64 val_64; 2254 struct xhci_segment *seg; 2255 u32 page_size, temp; 2256 int i; 2257 2258 page_size = xhci_readl(xhci, &xhci->op_regs->page_size); 2259 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size); 2260 for (i = 0; i < 16; i++) { 2261 if ((0x1 & page_size) != 0) 2262 break; 2263 page_size = page_size >> 1; 2264 } 2265 if (i < 16) 2266 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024); 2267 else 2268 xhci_warn(xhci, "WARN: no supported page size\n"); 2269 /* Use 4K pages, since that's common and the minimum the HC supports */ 2270 xhci->page_shift = 12; 2271 xhci->page_size = 1 << xhci->page_shift; 2272 xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024); 2273 2274 /* 2275 * Program the Number of Device Slots Enabled field in the CONFIG 2276 * register with the max value of slots the HC can handle. 2277 */ 2278 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1)); 2279 xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n", 2280 (unsigned int) val); 2281 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg); 2282 val |= (val2 & ~HCS_SLOTS_MASK); 2283 xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n", 2284 (unsigned int) val); 2285 xhci_writel(xhci, val, &xhci->op_regs->config_reg); 2286 2287 /* 2288 * Section 5.4.8 - doorbell array must be 2289 * "physically contiguous and 64-byte (cache line) aligned". 2290 */ 2291 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, 2292 GFP_KERNEL); 2293 if (!xhci->dcbaa) 2294 goto fail; 2295 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); 2296 xhci->dcbaa->dma = dma; 2297 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", 2298 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); 2299 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); 2300 2301 /* 2302 * Initialize the ring segment pool. The ring must be a contiguous 2303 * structure comprised of TRBs. The TRBs must be 16 byte aligned, 2304 * however, the command ring segment needs 64-byte aligned segments, 2305 * so we pick the greater alignment need. 2306 */ 2307 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, 2308 TRB_SEGMENT_SIZE, 64, xhci->page_size); 2309 2310 /* See Table 46 and Note on Figure 55 */ 2311 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 2312 2112, 64, xhci->page_size); 2313 if (!xhci->segment_pool || !xhci->device_pool) 2314 goto fail; 2315 2316 /* Linear stream context arrays don't have any boundary restrictions, 2317 * and only need to be 16-byte aligned. 2318 */ 2319 xhci->small_streams_pool = 2320 dma_pool_create("xHCI 256 byte stream ctx arrays", 2321 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0); 2322 xhci->medium_streams_pool = 2323 dma_pool_create("xHCI 1KB stream ctx arrays", 2324 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0); 2325 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE 2326 * will be allocated with dma_alloc_coherent() 2327 */ 2328 2329 if (!xhci->small_streams_pool || !xhci->medium_streams_pool) 2330 goto fail; 2331 2332 /* Set up the command ring to have one segments for now. */ 2333 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags); 2334 if (!xhci->cmd_ring) 2335 goto fail; 2336 INIT_LIST_HEAD(&xhci->cancel_cmd_list); 2337 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); 2338 xhci_dbg(xhci, "First segment DMA is 0x%llx\n", 2339 (unsigned long long)xhci->cmd_ring->first_seg->dma); 2340 2341 /* Set the address in the Command Ring Control register */ 2342 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 2343 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 2344 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | 2345 xhci->cmd_ring->cycle_state; 2346 xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val); 2347 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 2348 xhci_dbg_cmd_ptrs(xhci); 2349 2350 xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags); 2351 if (!xhci->lpm_command) 2352 goto fail; 2353 2354 /* Reserve one command ring TRB for disabling LPM. 2355 * Since the USB core grabs the shared usb_bus bandwidth mutex before 2356 * disabling LPM, we only need to reserve one TRB for all devices. 2357 */ 2358 xhci->cmd_ring_reserved_trbs++; 2359 2360 val = xhci_readl(xhci, &xhci->cap_regs->db_off); 2361 val &= DBOFF_MASK; 2362 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" 2363 " from cap regs base addr\n", val); 2364 xhci->dba = (void __iomem *) xhci->cap_regs + val; 2365 xhci_dbg_regs(xhci); 2366 xhci_print_run_regs(xhci); 2367 /* Set ir_set to interrupt register set 0 */ 2368 xhci->ir_set = &xhci->run_regs->ir_set[0]; 2369 2370 /* 2371 * Event ring setup: Allocate a normal ring, but also setup 2372 * the event ring segment table (ERST). Section 4.9.3. 2373 */ 2374 xhci_dbg(xhci, "// Allocating event ring\n"); 2375 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, 2376 flags); 2377 if (!xhci->event_ring) 2378 goto fail; 2379 if (xhci_check_trb_in_td_math(xhci, flags) < 0) 2380 goto fail; 2381 2382 xhci->erst.entries = dma_alloc_coherent(dev, 2383 sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma, 2384 GFP_KERNEL); 2385 if (!xhci->erst.entries) 2386 goto fail; 2387 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n", 2388 (unsigned long long)dma); 2389 2390 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); 2391 xhci->erst.num_entries = ERST_NUM_SEGS; 2392 xhci->erst.erst_dma_addr = dma; 2393 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n", 2394 xhci->erst.num_entries, 2395 xhci->erst.entries, 2396 (unsigned long long)xhci->erst.erst_dma_addr); 2397 2398 /* set ring base address and size for each segment table entry */ 2399 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { 2400 struct xhci_erst_entry *entry = &xhci->erst.entries[val]; 2401 entry->seg_addr = cpu_to_le64(seg->dma); 2402 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); 2403 entry->rsvd = 0; 2404 seg = seg->next; 2405 } 2406 2407 /* set ERST count with the number of entries in the segment table */ 2408 val = xhci_readl(xhci, &xhci->ir_set->erst_size); 2409 val &= ERST_SIZE_MASK; 2410 val |= ERST_NUM_SEGS; 2411 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n", 2412 val); 2413 xhci_writel(xhci, val, &xhci->ir_set->erst_size); 2414 2415 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n"); 2416 /* set the segment table base address */ 2417 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", 2418 (unsigned long long)xhci->erst.erst_dma_addr); 2419 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); 2420 val_64 &= ERST_PTR_MASK; 2421 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); 2422 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); 2423 2424 /* Set the event ring dequeue address */ 2425 xhci_set_hc_event_deq(xhci); 2426 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); 2427 xhci_print_ir_set(xhci, 0); 2428 2429 /* 2430 * XXX: Might need to set the Interrupter Moderation Register to 2431 * something other than the default (~1ms minimum between interrupts). 2432 * See section 5.5.1.2. 2433 */ 2434 init_completion(&xhci->addr_dev); 2435 for (i = 0; i < MAX_HC_SLOTS; ++i) 2436 xhci->devs[i] = NULL; 2437 for (i = 0; i < USB_MAXCHILDREN; ++i) { 2438 xhci->bus_state[0].resume_done[i] = 0; 2439 xhci->bus_state[1].resume_done[i] = 0; 2440 } 2441 2442 if (scratchpad_alloc(xhci, flags)) 2443 goto fail; 2444 if (xhci_setup_port_arrays(xhci, flags)) 2445 goto fail; 2446 2447 INIT_LIST_HEAD(&xhci->lpm_failed_devs); 2448 2449 /* Enable USB 3.0 device notifications for function remote wake, which 2450 * is necessary for allowing USB 3.0 devices to do remote wakeup from 2451 * U3 (device suspend). 2452 */ 2453 temp = xhci_readl(xhci, &xhci->op_regs->dev_notification); 2454 temp &= ~DEV_NOTE_MASK; 2455 temp |= DEV_NOTE_FWAKE; 2456 xhci_writel(xhci, temp, &xhci->op_regs->dev_notification); 2457 2458 return 0; 2459 2460 fail: 2461 xhci_warn(xhci, "Couldn't initialize memory\n"); 2462 xhci_halt(xhci); 2463 xhci_reset(xhci); 2464 xhci_mem_cleanup(xhci); 2465 return -ENOMEM; 2466 } 2467