1 /* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #include <linux/usb.h> 24 #include <linux/pci.h> 25 #include <linux/dmapool.h> 26 27 #include "xhci.h" 28 29 /* 30 * Allocates a generic ring segment from the ring pool, sets the dma address, 31 * initializes the segment to zero, and sets the private next pointer to NULL. 32 * 33 * Section 4.11.1.1: 34 * "All components of all Command and Transfer TRBs shall be initialized to '0'" 35 */ 36 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags) 37 { 38 struct xhci_segment *seg; 39 dma_addr_t dma; 40 41 seg = kzalloc(sizeof *seg, flags); 42 if (!seg) 43 return 0; 44 xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg); 45 46 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma); 47 if (!seg->trbs) { 48 kfree(seg); 49 return 0; 50 } 51 xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n", 52 seg->trbs, (unsigned long long)dma); 53 54 memset(seg->trbs, 0, SEGMENT_SIZE); 55 seg->dma = dma; 56 seg->next = NULL; 57 58 return seg; 59 } 60 61 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) 62 { 63 if (!seg) 64 return; 65 if (seg->trbs) { 66 xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n", 67 seg->trbs, (unsigned long long)seg->dma); 68 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); 69 seg->trbs = NULL; 70 } 71 xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg); 72 kfree(seg); 73 } 74 75 /* 76 * Make the prev segment point to the next segment. 77 * 78 * Change the last TRB in the prev segment to be a Link TRB which points to the 79 * DMA address of the next segment. The caller needs to set any Link TRB 80 * related flags, such as End TRB, Toggle Cycle, and no snoop. 81 */ 82 static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, 83 struct xhci_segment *next, bool link_trbs) 84 { 85 u32 val; 86 87 if (!prev || !next) 88 return; 89 prev->next = next; 90 if (link_trbs) { 91 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma; 92 93 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ 94 val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; 95 val &= ~TRB_TYPE_BITMASK; 96 val |= TRB_TYPE(TRB_LINK); 97 /* Always set the chain bit with 0.95 hardware */ 98 if (xhci_link_trb_quirk(xhci)) 99 val |= TRB_CHAIN; 100 prev->trbs[TRBS_PER_SEGMENT-1].link.control = val; 101 } 102 xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n", 103 (unsigned long long)prev->dma, 104 (unsigned long long)next->dma); 105 } 106 107 /* XXX: Do we need the hcd structure in all these functions? */ 108 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) 109 { 110 struct xhci_segment *seg; 111 struct xhci_segment *first_seg; 112 113 if (!ring || !ring->first_seg) 114 return; 115 first_seg = ring->first_seg; 116 seg = first_seg->next; 117 xhci_dbg(xhci, "Freeing ring at %p\n", ring); 118 while (seg != first_seg) { 119 struct xhci_segment *next = seg->next; 120 xhci_segment_free(xhci, seg); 121 seg = next; 122 } 123 xhci_segment_free(xhci, first_seg); 124 ring->first_seg = NULL; 125 kfree(ring); 126 } 127 128 /** 129 * Create a new ring with zero or more segments. 130 * 131 * Link each segment together into a ring. 132 * Set the end flag and the cycle toggle bit on the last segment. 133 * See section 4.9.1 and figures 15 and 16. 134 */ 135 static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, 136 unsigned int num_segs, bool link_trbs, gfp_t flags) 137 { 138 struct xhci_ring *ring; 139 struct xhci_segment *prev; 140 141 ring = kzalloc(sizeof *(ring), flags); 142 xhci_dbg(xhci, "Allocating ring at %p\n", ring); 143 if (!ring) 144 return 0; 145 146 INIT_LIST_HEAD(&ring->td_list); 147 if (num_segs == 0) 148 return ring; 149 150 ring->first_seg = xhci_segment_alloc(xhci, flags); 151 if (!ring->first_seg) 152 goto fail; 153 num_segs--; 154 155 prev = ring->first_seg; 156 while (num_segs > 0) { 157 struct xhci_segment *next; 158 159 next = xhci_segment_alloc(xhci, flags); 160 if (!next) 161 goto fail; 162 xhci_link_segments(xhci, prev, next, link_trbs); 163 164 prev = next; 165 num_segs--; 166 } 167 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs); 168 169 if (link_trbs) { 170 /* See section 4.9.2.1 and 6.4.4.1 */ 171 prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE); 172 xhci_dbg(xhci, "Wrote link toggle flag to" 173 " segment %p (virtual), 0x%llx (DMA)\n", 174 prev, (unsigned long long)prev->dma); 175 } 176 /* The ring is empty, so the enqueue pointer == dequeue pointer */ 177 ring->enqueue = ring->first_seg->trbs; 178 ring->enq_seg = ring->first_seg; 179 ring->dequeue = ring->enqueue; 180 ring->deq_seg = ring->first_seg; 181 /* The ring is initialized to 0. The producer must write 1 to the cycle 182 * bit to handover ownership of the TRB, so PCS = 1. The consumer must 183 * compare CCS to the cycle bit to check ownership, so CCS = 1. 184 */ 185 ring->cycle_state = 1; 186 187 return ring; 188 189 fail: 190 xhci_ring_free(xhci, ring); 191 return 0; 192 } 193 194 #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) 195 196 struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, 197 int type, gfp_t flags) 198 { 199 struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags); 200 if (!ctx) 201 return NULL; 202 203 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); 204 ctx->type = type; 205 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; 206 if (type == XHCI_CTX_TYPE_INPUT) 207 ctx->size += CTX_SIZE(xhci->hcc_params); 208 209 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); 210 memset(ctx->bytes, 0, ctx->size); 211 return ctx; 212 } 213 214 void xhci_free_container_ctx(struct xhci_hcd *xhci, 215 struct xhci_container_ctx *ctx) 216 { 217 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); 218 kfree(ctx); 219 } 220 221 struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, 222 struct xhci_container_ctx *ctx) 223 { 224 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); 225 return (struct xhci_input_control_ctx *)ctx->bytes; 226 } 227 228 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, 229 struct xhci_container_ctx *ctx) 230 { 231 if (ctx->type == XHCI_CTX_TYPE_DEVICE) 232 return (struct xhci_slot_ctx *)ctx->bytes; 233 234 return (struct xhci_slot_ctx *) 235 (ctx->bytes + CTX_SIZE(xhci->hcc_params)); 236 } 237 238 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, 239 struct xhci_container_ctx *ctx, 240 unsigned int ep_index) 241 { 242 /* increment ep index by offset of start of ep ctx array */ 243 ep_index++; 244 if (ctx->type == XHCI_CTX_TYPE_INPUT) 245 ep_index++; 246 247 return (struct xhci_ep_ctx *) 248 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); 249 } 250 251 /* All the xhci_tds in the ring's TD list should be freed at this point */ 252 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) 253 { 254 struct xhci_virt_device *dev; 255 int i; 256 257 /* Slot ID 0 is reserved */ 258 if (slot_id == 0 || !xhci->devs[slot_id]) 259 return; 260 261 dev = xhci->devs[slot_id]; 262 xhci->dcbaa->dev_context_ptrs[slot_id] = 0; 263 if (!dev) 264 return; 265 266 for (i = 0; i < 31; ++i) 267 if (dev->eps[i].ring) 268 xhci_ring_free(xhci, dev->eps[i].ring); 269 270 if (dev->in_ctx) 271 xhci_free_container_ctx(xhci, dev->in_ctx); 272 if (dev->out_ctx) 273 xhci_free_container_ctx(xhci, dev->out_ctx); 274 275 kfree(xhci->devs[slot_id]); 276 xhci->devs[slot_id] = 0; 277 } 278 279 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, 280 struct usb_device *udev, gfp_t flags) 281 { 282 struct xhci_virt_device *dev; 283 int i; 284 285 /* Slot ID 0 is reserved */ 286 if (slot_id == 0 || xhci->devs[slot_id]) { 287 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id); 288 return 0; 289 } 290 291 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); 292 if (!xhci->devs[slot_id]) 293 return 0; 294 dev = xhci->devs[slot_id]; 295 296 /* Allocate the (output) device context that will be used in the HC. */ 297 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); 298 if (!dev->out_ctx) 299 goto fail; 300 301 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, 302 (unsigned long long)dev->out_ctx->dma); 303 304 /* Allocate the (input) device context for address device command */ 305 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); 306 if (!dev->in_ctx) 307 goto fail; 308 309 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, 310 (unsigned long long)dev->in_ctx->dma); 311 312 /* Initialize the cancellation list for each endpoint */ 313 for (i = 0; i < 31; i++) 314 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); 315 316 /* Allocate endpoint 0 ring */ 317 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags); 318 if (!dev->eps[0].ring) 319 goto fail; 320 321 init_completion(&dev->cmd_completion); 322 INIT_LIST_HEAD(&dev->cmd_list); 323 324 /* Point to output device context in dcbaa. */ 325 xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; 326 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", 327 slot_id, 328 &xhci->dcbaa->dev_context_ptrs[slot_id], 329 (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]); 330 331 return 1; 332 fail: 333 xhci_free_virt_device(xhci, slot_id); 334 return 0; 335 } 336 337 /* Setup an xHCI virtual device for a Set Address command */ 338 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) 339 { 340 struct xhci_virt_device *dev; 341 struct xhci_ep_ctx *ep0_ctx; 342 struct usb_device *top_dev; 343 struct xhci_slot_ctx *slot_ctx; 344 struct xhci_input_control_ctx *ctrl_ctx; 345 346 dev = xhci->devs[udev->slot_id]; 347 /* Slot ID 0 is reserved */ 348 if (udev->slot_id == 0 || !dev) { 349 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n", 350 udev->slot_id); 351 return -EINVAL; 352 } 353 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); 354 ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx); 355 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); 356 357 /* 2) New slot context and endpoint 0 context are valid*/ 358 ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG; 359 360 /* 3) Only the control endpoint is valid - one endpoint context */ 361 slot_ctx->dev_info |= LAST_CTX(1); 362 363 slot_ctx->dev_info |= (u32) udev->route; 364 switch (udev->speed) { 365 case USB_SPEED_SUPER: 366 slot_ctx->dev_info |= (u32) SLOT_SPEED_SS; 367 break; 368 case USB_SPEED_HIGH: 369 slot_ctx->dev_info |= (u32) SLOT_SPEED_HS; 370 break; 371 case USB_SPEED_FULL: 372 slot_ctx->dev_info |= (u32) SLOT_SPEED_FS; 373 break; 374 case USB_SPEED_LOW: 375 slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; 376 break; 377 case USB_SPEED_VARIABLE: 378 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 379 return -EINVAL; 380 break; 381 default: 382 /* Speed was set earlier, this shouldn't happen. */ 383 BUG(); 384 } 385 /* Find the root hub port this device is under */ 386 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 387 top_dev = top_dev->parent) 388 /* Found device below root hub */; 389 slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); 390 xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); 391 392 /* Is this a LS/FS device under a HS hub? */ 393 if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && 394 udev->tt) { 395 slot_ctx->tt_info = udev->tt->hub->slot_id; 396 slot_ctx->tt_info |= udev->ttport << 8; 397 if (udev->tt->multi) 398 slot_ctx->dev_info |= DEV_MTT; 399 } 400 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); 401 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); 402 403 /* Step 4 - ring already allocated */ 404 /* Step 5 */ 405 ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP); 406 /* 407 * XXX: Not sure about wireless USB devices. 408 */ 409 switch (udev->speed) { 410 case USB_SPEED_SUPER: 411 ep0_ctx->ep_info2 |= MAX_PACKET(512); 412 break; 413 case USB_SPEED_HIGH: 414 /* USB core guesses at a 64-byte max packet first for FS devices */ 415 case USB_SPEED_FULL: 416 ep0_ctx->ep_info2 |= MAX_PACKET(64); 417 break; 418 case USB_SPEED_LOW: 419 ep0_ctx->ep_info2 |= MAX_PACKET(8); 420 break; 421 case USB_SPEED_VARIABLE: 422 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 423 return -EINVAL; 424 break; 425 default: 426 /* New speed? */ 427 BUG(); 428 } 429 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ 430 ep0_ctx->ep_info2 |= MAX_BURST(0); 431 ep0_ctx->ep_info2 |= ERROR_COUNT(3); 432 433 ep0_ctx->deq = 434 dev->eps[0].ring->first_seg->dma; 435 ep0_ctx->deq |= dev->eps[0].ring->cycle_state; 436 437 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ 438 439 return 0; 440 } 441 442 /* Return the polling or NAK interval. 443 * 444 * The polling interval is expressed in "microframes". If xHCI's Interval field 445 * is set to N, it will service the endpoint every 2^(Interval)*125us. 446 * 447 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval 448 * is set to 0. 449 */ 450 static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, 451 struct usb_host_endpoint *ep) 452 { 453 unsigned int interval = 0; 454 455 switch (udev->speed) { 456 case USB_SPEED_HIGH: 457 /* Max NAK rate */ 458 if (usb_endpoint_xfer_control(&ep->desc) || 459 usb_endpoint_xfer_bulk(&ep->desc)) 460 interval = ep->desc.bInterval; 461 /* Fall through - SS and HS isoc/int have same decoding */ 462 case USB_SPEED_SUPER: 463 if (usb_endpoint_xfer_int(&ep->desc) || 464 usb_endpoint_xfer_isoc(&ep->desc)) { 465 if (ep->desc.bInterval == 0) 466 interval = 0; 467 else 468 interval = ep->desc.bInterval - 1; 469 if (interval > 15) 470 interval = 15; 471 if (interval != ep->desc.bInterval + 1) 472 dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", 473 ep->desc.bEndpointAddress, 1 << interval); 474 } 475 break; 476 /* Convert bInterval (in 1-255 frames) to microframes and round down to 477 * nearest power of 2. 478 */ 479 case USB_SPEED_FULL: 480 case USB_SPEED_LOW: 481 if (usb_endpoint_xfer_int(&ep->desc) || 482 usb_endpoint_xfer_isoc(&ep->desc)) { 483 interval = fls(8*ep->desc.bInterval) - 1; 484 if (interval > 10) 485 interval = 10; 486 if (interval < 3) 487 interval = 3; 488 if ((1 << interval) != 8*ep->desc.bInterval) 489 dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", 490 ep->desc.bEndpointAddress, 1 << interval); 491 } 492 break; 493 default: 494 BUG(); 495 } 496 return EP_INTERVAL(interval); 497 } 498 499 static inline u32 xhci_get_endpoint_type(struct usb_device *udev, 500 struct usb_host_endpoint *ep) 501 { 502 int in; 503 u32 type; 504 505 in = usb_endpoint_dir_in(&ep->desc); 506 if (usb_endpoint_xfer_control(&ep->desc)) { 507 type = EP_TYPE(CTRL_EP); 508 } else if (usb_endpoint_xfer_bulk(&ep->desc)) { 509 if (in) 510 type = EP_TYPE(BULK_IN_EP); 511 else 512 type = EP_TYPE(BULK_OUT_EP); 513 } else if (usb_endpoint_xfer_isoc(&ep->desc)) { 514 if (in) 515 type = EP_TYPE(ISOC_IN_EP); 516 else 517 type = EP_TYPE(ISOC_OUT_EP); 518 } else if (usb_endpoint_xfer_int(&ep->desc)) { 519 if (in) 520 type = EP_TYPE(INT_IN_EP); 521 else 522 type = EP_TYPE(INT_OUT_EP); 523 } else { 524 BUG(); 525 } 526 return type; 527 } 528 529 int xhci_endpoint_init(struct xhci_hcd *xhci, 530 struct xhci_virt_device *virt_dev, 531 struct usb_device *udev, 532 struct usb_host_endpoint *ep, 533 gfp_t mem_flags) 534 { 535 unsigned int ep_index; 536 struct xhci_ep_ctx *ep_ctx; 537 struct xhci_ring *ep_ring; 538 unsigned int max_packet; 539 unsigned int max_burst; 540 541 ep_index = xhci_get_endpoint_index(&ep->desc); 542 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 543 544 /* Set up the endpoint ring */ 545 virt_dev->eps[ep_index].new_ring = 546 xhci_ring_alloc(xhci, 1, true, mem_flags); 547 if (!virt_dev->eps[ep_index].new_ring) 548 return -ENOMEM; 549 ep_ring = virt_dev->eps[ep_index].new_ring; 550 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; 551 552 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); 553 554 /* FIXME dig Mult and streams info out of ep companion desc */ 555 556 /* Allow 3 retries for everything but isoc; 557 * error count = 0 means infinite retries. 558 */ 559 if (!usb_endpoint_xfer_isoc(&ep->desc)) 560 ep_ctx->ep_info2 = ERROR_COUNT(3); 561 else 562 ep_ctx->ep_info2 = ERROR_COUNT(1); 563 564 ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); 565 566 /* Set the max packet size and max burst */ 567 switch (udev->speed) { 568 case USB_SPEED_SUPER: 569 max_packet = ep->desc.wMaxPacketSize; 570 ep_ctx->ep_info2 |= MAX_PACKET(max_packet); 571 /* dig out max burst from ep companion desc */ 572 if (!ep->ss_ep_comp) { 573 xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n"); 574 max_packet = 0; 575 } else { 576 max_packet = ep->ss_ep_comp->desc.bMaxBurst; 577 } 578 ep_ctx->ep_info2 |= MAX_BURST(max_packet); 579 break; 580 case USB_SPEED_HIGH: 581 /* bits 11:12 specify the number of additional transaction 582 * opportunities per microframe (USB 2.0, section 9.6.6) 583 */ 584 if (usb_endpoint_xfer_isoc(&ep->desc) || 585 usb_endpoint_xfer_int(&ep->desc)) { 586 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; 587 ep_ctx->ep_info2 |= MAX_BURST(max_burst); 588 } 589 /* Fall through */ 590 case USB_SPEED_FULL: 591 case USB_SPEED_LOW: 592 max_packet = ep->desc.wMaxPacketSize & 0x3ff; 593 ep_ctx->ep_info2 |= MAX_PACKET(max_packet); 594 break; 595 default: 596 BUG(); 597 } 598 /* FIXME Debug endpoint context */ 599 return 0; 600 } 601 602 void xhci_endpoint_zero(struct xhci_hcd *xhci, 603 struct xhci_virt_device *virt_dev, 604 struct usb_host_endpoint *ep) 605 { 606 unsigned int ep_index; 607 struct xhci_ep_ctx *ep_ctx; 608 609 ep_index = xhci_get_endpoint_index(&ep->desc); 610 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 611 612 ep_ctx->ep_info = 0; 613 ep_ctx->ep_info2 = 0; 614 ep_ctx->deq = 0; 615 ep_ctx->tx_info = 0; 616 /* Don't free the endpoint ring until the set interface or configuration 617 * request succeeds. 618 */ 619 } 620 621 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy. 622 * Useful when you want to change one particular aspect of the endpoint and then 623 * issue a configure endpoint command. 624 */ 625 void xhci_endpoint_copy(struct xhci_hcd *xhci, 626 struct xhci_container_ctx *in_ctx, 627 struct xhci_container_ctx *out_ctx, 628 unsigned int ep_index) 629 { 630 struct xhci_ep_ctx *out_ep_ctx; 631 struct xhci_ep_ctx *in_ep_ctx; 632 633 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 634 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 635 636 in_ep_ctx->ep_info = out_ep_ctx->ep_info; 637 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; 638 in_ep_ctx->deq = out_ep_ctx->deq; 639 in_ep_ctx->tx_info = out_ep_ctx->tx_info; 640 } 641 642 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx. 643 * Useful when you want to change one particular aspect of the endpoint and then 644 * issue a configure endpoint command. Only the context entries field matters, 645 * but we'll copy the whole thing anyway. 646 */ 647 void xhci_slot_copy(struct xhci_hcd *xhci, 648 struct xhci_container_ctx *in_ctx, 649 struct xhci_container_ctx *out_ctx) 650 { 651 struct xhci_slot_ctx *in_slot_ctx; 652 struct xhci_slot_ctx *out_slot_ctx; 653 654 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 655 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx); 656 657 in_slot_ctx->dev_info = out_slot_ctx->dev_info; 658 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; 659 in_slot_ctx->tt_info = out_slot_ctx->tt_info; 660 in_slot_ctx->dev_state = out_slot_ctx->dev_state; 661 } 662 663 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ 664 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) 665 { 666 int i; 667 struct device *dev = xhci_to_hcd(xhci)->self.controller; 668 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 669 670 xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp); 671 672 if (!num_sp) 673 return 0; 674 675 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags); 676 if (!xhci->scratchpad) 677 goto fail_sp; 678 679 xhci->scratchpad->sp_array = 680 pci_alloc_consistent(to_pci_dev(dev), 681 num_sp * sizeof(u64), 682 &xhci->scratchpad->sp_dma); 683 if (!xhci->scratchpad->sp_array) 684 goto fail_sp2; 685 686 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags); 687 if (!xhci->scratchpad->sp_buffers) 688 goto fail_sp3; 689 690 xhci->scratchpad->sp_dma_buffers = 691 kzalloc(sizeof(dma_addr_t) * num_sp, flags); 692 693 if (!xhci->scratchpad->sp_dma_buffers) 694 goto fail_sp4; 695 696 xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma; 697 for (i = 0; i < num_sp; i++) { 698 dma_addr_t dma; 699 void *buf = pci_alloc_consistent(to_pci_dev(dev), 700 xhci->page_size, &dma); 701 if (!buf) 702 goto fail_sp5; 703 704 xhci->scratchpad->sp_array[i] = dma; 705 xhci->scratchpad->sp_buffers[i] = buf; 706 xhci->scratchpad->sp_dma_buffers[i] = dma; 707 } 708 709 return 0; 710 711 fail_sp5: 712 for (i = i - 1; i >= 0; i--) { 713 pci_free_consistent(to_pci_dev(dev), xhci->page_size, 714 xhci->scratchpad->sp_buffers[i], 715 xhci->scratchpad->sp_dma_buffers[i]); 716 } 717 kfree(xhci->scratchpad->sp_dma_buffers); 718 719 fail_sp4: 720 kfree(xhci->scratchpad->sp_buffers); 721 722 fail_sp3: 723 pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64), 724 xhci->scratchpad->sp_array, 725 xhci->scratchpad->sp_dma); 726 727 fail_sp2: 728 kfree(xhci->scratchpad); 729 xhci->scratchpad = NULL; 730 731 fail_sp: 732 return -ENOMEM; 733 } 734 735 static void scratchpad_free(struct xhci_hcd *xhci) 736 { 737 int num_sp; 738 int i; 739 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 740 741 if (!xhci->scratchpad) 742 return; 743 744 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 745 746 for (i = 0; i < num_sp; i++) { 747 pci_free_consistent(pdev, xhci->page_size, 748 xhci->scratchpad->sp_buffers[i], 749 xhci->scratchpad->sp_dma_buffers[i]); 750 } 751 kfree(xhci->scratchpad->sp_dma_buffers); 752 kfree(xhci->scratchpad->sp_buffers); 753 pci_free_consistent(pdev, num_sp * sizeof(u64), 754 xhci->scratchpad->sp_array, 755 xhci->scratchpad->sp_dma); 756 kfree(xhci->scratchpad); 757 xhci->scratchpad = NULL; 758 } 759 760 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, 761 bool allocate_completion, gfp_t mem_flags) 762 { 763 struct xhci_command *command; 764 765 command = kzalloc(sizeof(*command), mem_flags); 766 if (!command) 767 return NULL; 768 769 command->in_ctx = 770 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags); 771 if (!command->in_ctx) 772 return NULL; 773 774 if (allocate_completion) { 775 command->completion = 776 kzalloc(sizeof(struct completion), mem_flags); 777 if (!command->completion) { 778 xhci_free_container_ctx(xhci, command->in_ctx); 779 return NULL; 780 } 781 init_completion(command->completion); 782 } 783 784 command->status = 0; 785 INIT_LIST_HEAD(&command->cmd_list); 786 return command; 787 } 788 789 void xhci_free_command(struct xhci_hcd *xhci, 790 struct xhci_command *command) 791 { 792 xhci_free_container_ctx(xhci, 793 command->in_ctx); 794 kfree(command->completion); 795 kfree(command); 796 } 797 798 void xhci_mem_cleanup(struct xhci_hcd *xhci) 799 { 800 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 801 int size; 802 int i; 803 804 /* Free the Event Ring Segment Table and the actual Event Ring */ 805 xhci_writel(xhci, 0, &xhci->ir_set->erst_size); 806 xhci_write_64(xhci, 0, &xhci->ir_set->erst_base); 807 xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue); 808 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 809 if (xhci->erst.entries) 810 pci_free_consistent(pdev, size, 811 xhci->erst.entries, xhci->erst.erst_dma_addr); 812 xhci->erst.entries = NULL; 813 xhci_dbg(xhci, "Freed ERST\n"); 814 if (xhci->event_ring) 815 xhci_ring_free(xhci, xhci->event_ring); 816 xhci->event_ring = NULL; 817 xhci_dbg(xhci, "Freed event ring\n"); 818 819 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); 820 if (xhci->cmd_ring) 821 xhci_ring_free(xhci, xhci->cmd_ring); 822 xhci->cmd_ring = NULL; 823 xhci_dbg(xhci, "Freed command ring\n"); 824 825 for (i = 1; i < MAX_HC_SLOTS; ++i) 826 xhci_free_virt_device(xhci, i); 827 828 if (xhci->segment_pool) 829 dma_pool_destroy(xhci->segment_pool); 830 xhci->segment_pool = NULL; 831 xhci_dbg(xhci, "Freed segment pool\n"); 832 833 if (xhci->device_pool) 834 dma_pool_destroy(xhci->device_pool); 835 xhci->device_pool = NULL; 836 xhci_dbg(xhci, "Freed device context pool\n"); 837 838 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); 839 if (xhci->dcbaa) 840 pci_free_consistent(pdev, sizeof(*xhci->dcbaa), 841 xhci->dcbaa, xhci->dcbaa->dma); 842 xhci->dcbaa = NULL; 843 844 xhci->page_size = 0; 845 xhci->page_shift = 0; 846 scratchpad_free(xhci); 847 } 848 849 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) 850 { 851 dma_addr_t dma; 852 struct device *dev = xhci_to_hcd(xhci)->self.controller; 853 unsigned int val, val2; 854 u64 val_64; 855 struct xhci_segment *seg; 856 u32 page_size; 857 int i; 858 859 page_size = xhci_readl(xhci, &xhci->op_regs->page_size); 860 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size); 861 for (i = 0; i < 16; i++) { 862 if ((0x1 & page_size) != 0) 863 break; 864 page_size = page_size >> 1; 865 } 866 if (i < 16) 867 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024); 868 else 869 xhci_warn(xhci, "WARN: no supported page size\n"); 870 /* Use 4K pages, since that's common and the minimum the HC supports */ 871 xhci->page_shift = 12; 872 xhci->page_size = 1 << xhci->page_shift; 873 xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024); 874 875 /* 876 * Program the Number of Device Slots Enabled field in the CONFIG 877 * register with the max value of slots the HC can handle. 878 */ 879 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1)); 880 xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n", 881 (unsigned int) val); 882 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg); 883 val |= (val2 & ~HCS_SLOTS_MASK); 884 xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n", 885 (unsigned int) val); 886 xhci_writel(xhci, val, &xhci->op_regs->config_reg); 887 888 /* 889 * Section 5.4.8 - doorbell array must be 890 * "physically contiguous and 64-byte (cache line) aligned". 891 */ 892 xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev), 893 sizeof(*xhci->dcbaa), &dma); 894 if (!xhci->dcbaa) 895 goto fail; 896 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); 897 xhci->dcbaa->dma = dma; 898 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", 899 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); 900 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); 901 902 /* 903 * Initialize the ring segment pool. The ring must be a contiguous 904 * structure comprised of TRBs. The TRBs must be 16 byte aligned, 905 * however, the command ring segment needs 64-byte aligned segments, 906 * so we pick the greater alignment need. 907 */ 908 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, 909 SEGMENT_SIZE, 64, xhci->page_size); 910 911 /* See Table 46 and Note on Figure 55 */ 912 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 913 2112, 64, xhci->page_size); 914 if (!xhci->segment_pool || !xhci->device_pool) 915 goto fail; 916 917 /* Set up the command ring to have one segments for now. */ 918 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags); 919 if (!xhci->cmd_ring) 920 goto fail; 921 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); 922 xhci_dbg(xhci, "First segment DMA is 0x%llx\n", 923 (unsigned long long)xhci->cmd_ring->first_seg->dma); 924 925 /* Set the address in the Command Ring Control register */ 926 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 927 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 928 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | 929 xhci->cmd_ring->cycle_state; 930 xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val); 931 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 932 xhci_dbg_cmd_ptrs(xhci); 933 934 val = xhci_readl(xhci, &xhci->cap_regs->db_off); 935 val &= DBOFF_MASK; 936 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" 937 " from cap regs base addr\n", val); 938 xhci->dba = (void *) xhci->cap_regs + val; 939 xhci_dbg_regs(xhci); 940 xhci_print_run_regs(xhci); 941 /* Set ir_set to interrupt register set 0 */ 942 xhci->ir_set = (void *) xhci->run_regs->ir_set; 943 944 /* 945 * Event ring setup: Allocate a normal ring, but also setup 946 * the event ring segment table (ERST). Section 4.9.3. 947 */ 948 xhci_dbg(xhci, "// Allocating event ring\n"); 949 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); 950 if (!xhci->event_ring) 951 goto fail; 952 953 xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev), 954 sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma); 955 if (!xhci->erst.entries) 956 goto fail; 957 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n", 958 (unsigned long long)dma); 959 960 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); 961 xhci->erst.num_entries = ERST_NUM_SEGS; 962 xhci->erst.erst_dma_addr = dma; 963 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n", 964 xhci->erst.num_entries, 965 xhci->erst.entries, 966 (unsigned long long)xhci->erst.erst_dma_addr); 967 968 /* set ring base address and size for each segment table entry */ 969 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { 970 struct xhci_erst_entry *entry = &xhci->erst.entries[val]; 971 entry->seg_addr = seg->dma; 972 entry->seg_size = TRBS_PER_SEGMENT; 973 entry->rsvd = 0; 974 seg = seg->next; 975 } 976 977 /* set ERST count with the number of entries in the segment table */ 978 val = xhci_readl(xhci, &xhci->ir_set->erst_size); 979 val &= ERST_SIZE_MASK; 980 val |= ERST_NUM_SEGS; 981 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n", 982 val); 983 xhci_writel(xhci, val, &xhci->ir_set->erst_size); 984 985 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n"); 986 /* set the segment table base address */ 987 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", 988 (unsigned long long)xhci->erst.erst_dma_addr); 989 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); 990 val_64 &= ERST_PTR_MASK; 991 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); 992 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); 993 994 /* Set the event ring dequeue address */ 995 xhci_set_hc_event_deq(xhci); 996 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); 997 xhci_print_ir_set(xhci, xhci->ir_set, 0); 998 999 /* 1000 * XXX: Might need to set the Interrupter Moderation Register to 1001 * something other than the default (~1ms minimum between interrupts). 1002 * See section 5.5.1.2. 1003 */ 1004 init_completion(&xhci->addr_dev); 1005 for (i = 0; i < MAX_HC_SLOTS; ++i) 1006 xhci->devs[i] = 0; 1007 1008 if (scratchpad_alloc(xhci, flags)) 1009 goto fail; 1010 1011 return 0; 1012 1013 fail: 1014 xhci_warn(xhci, "Couldn't initialize memory\n"); 1015 xhci_mem_cleanup(xhci); 1016 return -ENOMEM; 1017 } 1018