1 /* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #include <linux/usb.h> 24 #include <linux/pci.h> 25 #include <linux/dmapool.h> 26 27 #include "xhci.h" 28 29 /* 30 * Allocates a generic ring segment from the ring pool, sets the dma address, 31 * initializes the segment to zero, and sets the private next pointer to NULL. 32 * 33 * Section 4.11.1.1: 34 * "All components of all Command and Transfer TRBs shall be initialized to '0'" 35 */ 36 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags) 37 { 38 struct xhci_segment *seg; 39 dma_addr_t dma; 40 41 seg = kzalloc(sizeof *seg, flags); 42 if (!seg) 43 return 0; 44 xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg); 45 46 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma); 47 if (!seg->trbs) { 48 kfree(seg); 49 return 0; 50 } 51 xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n", 52 seg->trbs, (unsigned long long)dma); 53 54 memset(seg->trbs, 0, SEGMENT_SIZE); 55 seg->dma = dma; 56 seg->next = NULL; 57 58 return seg; 59 } 60 61 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) 62 { 63 if (!seg) 64 return; 65 if (seg->trbs) { 66 xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n", 67 seg->trbs, (unsigned long long)seg->dma); 68 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); 69 seg->trbs = NULL; 70 } 71 xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg); 72 kfree(seg); 73 } 74 75 /* 76 * Make the prev segment point to the next segment. 77 * 78 * Change the last TRB in the prev segment to be a Link TRB which points to the 79 * DMA address of the next segment. The caller needs to set any Link TRB 80 * related flags, such as End TRB, Toggle Cycle, and no snoop. 81 */ 82 static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, 83 struct xhci_segment *next, bool link_trbs) 84 { 85 u32 val; 86 87 if (!prev || !next) 88 return; 89 prev->next = next; 90 if (link_trbs) { 91 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma; 92 93 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ 94 val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; 95 val &= ~TRB_TYPE_BITMASK; 96 val |= TRB_TYPE(TRB_LINK); 97 prev->trbs[TRBS_PER_SEGMENT-1].link.control = val; 98 } 99 xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n", 100 (unsigned long long)prev->dma, 101 (unsigned long long)next->dma); 102 } 103 104 /* XXX: Do we need the hcd structure in all these functions? */ 105 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) 106 { 107 struct xhci_segment *seg; 108 struct xhci_segment *first_seg; 109 110 if (!ring || !ring->first_seg) 111 return; 112 first_seg = ring->first_seg; 113 seg = first_seg->next; 114 xhci_dbg(xhci, "Freeing ring at %p\n", ring); 115 while (seg != first_seg) { 116 struct xhci_segment *next = seg->next; 117 xhci_segment_free(xhci, seg); 118 seg = next; 119 } 120 xhci_segment_free(xhci, first_seg); 121 ring->first_seg = NULL; 122 kfree(ring); 123 } 124 125 /** 126 * Create a new ring with zero or more segments. 127 * 128 * Link each segment together into a ring. 129 * Set the end flag and the cycle toggle bit on the last segment. 130 * See section 4.9.1 and figures 15 and 16. 131 */ 132 static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, 133 unsigned int num_segs, bool link_trbs, gfp_t flags) 134 { 135 struct xhci_ring *ring; 136 struct xhci_segment *prev; 137 138 ring = kzalloc(sizeof *(ring), flags); 139 xhci_dbg(xhci, "Allocating ring at %p\n", ring); 140 if (!ring) 141 return 0; 142 143 INIT_LIST_HEAD(&ring->td_list); 144 INIT_LIST_HEAD(&ring->cancelled_td_list); 145 if (num_segs == 0) 146 return ring; 147 148 ring->first_seg = xhci_segment_alloc(xhci, flags); 149 if (!ring->first_seg) 150 goto fail; 151 num_segs--; 152 153 prev = ring->first_seg; 154 while (num_segs > 0) { 155 struct xhci_segment *next; 156 157 next = xhci_segment_alloc(xhci, flags); 158 if (!next) 159 goto fail; 160 xhci_link_segments(xhci, prev, next, link_trbs); 161 162 prev = next; 163 num_segs--; 164 } 165 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs); 166 167 if (link_trbs) { 168 /* See section 4.9.2.1 and 6.4.4.1 */ 169 prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE); 170 xhci_dbg(xhci, "Wrote link toggle flag to" 171 " segment %p (virtual), 0x%llx (DMA)\n", 172 prev, (unsigned long long)prev->dma); 173 } 174 /* The ring is empty, so the enqueue pointer == dequeue pointer */ 175 ring->enqueue = ring->first_seg->trbs; 176 ring->enq_seg = ring->first_seg; 177 ring->dequeue = ring->enqueue; 178 ring->deq_seg = ring->first_seg; 179 /* The ring is initialized to 0. The producer must write 1 to the cycle 180 * bit to handover ownership of the TRB, so PCS = 1. The consumer must 181 * compare CCS to the cycle bit to check ownership, so CCS = 1. 182 */ 183 ring->cycle_state = 1; 184 185 return ring; 186 187 fail: 188 xhci_ring_free(xhci, ring); 189 return 0; 190 } 191 192 #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) 193 194 struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, 195 int type, gfp_t flags) 196 { 197 struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags); 198 if (!ctx) 199 return NULL; 200 201 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); 202 ctx->type = type; 203 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; 204 if (type == XHCI_CTX_TYPE_INPUT) 205 ctx->size += CTX_SIZE(xhci->hcc_params); 206 207 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); 208 memset(ctx->bytes, 0, ctx->size); 209 return ctx; 210 } 211 212 void xhci_free_container_ctx(struct xhci_hcd *xhci, 213 struct xhci_container_ctx *ctx) 214 { 215 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); 216 kfree(ctx); 217 } 218 219 struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, 220 struct xhci_container_ctx *ctx) 221 { 222 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); 223 return (struct xhci_input_control_ctx *)ctx->bytes; 224 } 225 226 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, 227 struct xhci_container_ctx *ctx) 228 { 229 if (ctx->type == XHCI_CTX_TYPE_DEVICE) 230 return (struct xhci_slot_ctx *)ctx->bytes; 231 232 return (struct xhci_slot_ctx *) 233 (ctx->bytes + CTX_SIZE(xhci->hcc_params)); 234 } 235 236 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, 237 struct xhci_container_ctx *ctx, 238 unsigned int ep_index) 239 { 240 /* increment ep index by offset of start of ep ctx array */ 241 ep_index++; 242 if (ctx->type == XHCI_CTX_TYPE_INPUT) 243 ep_index++; 244 245 return (struct xhci_ep_ctx *) 246 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); 247 } 248 249 /* All the xhci_tds in the ring's TD list should be freed at this point */ 250 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) 251 { 252 struct xhci_virt_device *dev; 253 int i; 254 255 /* Slot ID 0 is reserved */ 256 if (slot_id == 0 || !xhci->devs[slot_id]) 257 return; 258 259 dev = xhci->devs[slot_id]; 260 xhci->dcbaa->dev_context_ptrs[slot_id] = 0; 261 if (!dev) 262 return; 263 264 for (i = 0; i < 31; ++i) 265 if (dev->ep_rings[i]) 266 xhci_ring_free(xhci, dev->ep_rings[i]); 267 268 if (dev->in_ctx) 269 xhci_free_container_ctx(xhci, dev->in_ctx); 270 if (dev->out_ctx) 271 xhci_free_container_ctx(xhci, dev->out_ctx); 272 273 kfree(xhci->devs[slot_id]); 274 xhci->devs[slot_id] = 0; 275 } 276 277 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, 278 struct usb_device *udev, gfp_t flags) 279 { 280 struct xhci_virt_device *dev; 281 282 /* Slot ID 0 is reserved */ 283 if (slot_id == 0 || xhci->devs[slot_id]) { 284 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id); 285 return 0; 286 } 287 288 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); 289 if (!xhci->devs[slot_id]) 290 return 0; 291 dev = xhci->devs[slot_id]; 292 293 /* Allocate the (output) device context that will be used in the HC. */ 294 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); 295 if (!dev->out_ctx) 296 goto fail; 297 298 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, 299 (unsigned long long)dev->out_ctx->dma); 300 301 /* Allocate the (input) device context for address device command */ 302 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); 303 if (!dev->in_ctx) 304 goto fail; 305 306 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, 307 (unsigned long long)dev->in_ctx->dma); 308 309 /* Allocate endpoint 0 ring */ 310 dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); 311 if (!dev->ep_rings[0]) 312 goto fail; 313 314 init_completion(&dev->cmd_completion); 315 316 /* Point to output device context in dcbaa. */ 317 xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; 318 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", 319 slot_id, 320 &xhci->dcbaa->dev_context_ptrs[slot_id], 321 (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]); 322 323 return 1; 324 fail: 325 xhci_free_virt_device(xhci, slot_id); 326 return 0; 327 } 328 329 /* Setup an xHCI virtual device for a Set Address command */ 330 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) 331 { 332 struct xhci_virt_device *dev; 333 struct xhci_ep_ctx *ep0_ctx; 334 struct usb_device *top_dev; 335 struct xhci_slot_ctx *slot_ctx; 336 struct xhci_input_control_ctx *ctrl_ctx; 337 338 dev = xhci->devs[udev->slot_id]; 339 /* Slot ID 0 is reserved */ 340 if (udev->slot_id == 0 || !dev) { 341 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n", 342 udev->slot_id); 343 return -EINVAL; 344 } 345 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); 346 ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx); 347 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); 348 349 /* 2) New slot context and endpoint 0 context are valid*/ 350 ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG; 351 352 /* 3) Only the control endpoint is valid - one endpoint context */ 353 slot_ctx->dev_info |= LAST_CTX(1); 354 355 switch (udev->speed) { 356 case USB_SPEED_SUPER: 357 slot_ctx->dev_info |= (u32) udev->route; 358 slot_ctx->dev_info |= (u32) SLOT_SPEED_SS; 359 break; 360 case USB_SPEED_HIGH: 361 slot_ctx->dev_info |= (u32) SLOT_SPEED_HS; 362 break; 363 case USB_SPEED_FULL: 364 slot_ctx->dev_info |= (u32) SLOT_SPEED_FS; 365 break; 366 case USB_SPEED_LOW: 367 slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; 368 break; 369 case USB_SPEED_VARIABLE: 370 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 371 return -EINVAL; 372 break; 373 default: 374 /* Speed was set earlier, this shouldn't happen. */ 375 BUG(); 376 } 377 /* Find the root hub port this device is under */ 378 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 379 top_dev = top_dev->parent) 380 /* Found device below root hub */; 381 slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); 382 xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); 383 384 /* Is this a LS/FS device under a HS hub? */ 385 /* 386 * FIXME: I don't think this is right, where does the TT info for the 387 * roothub or parent hub come from? 388 */ 389 if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && 390 udev->tt) { 391 slot_ctx->tt_info = udev->tt->hub->slot_id; 392 slot_ctx->tt_info |= udev->ttport << 8; 393 } 394 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); 395 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); 396 397 /* Step 4 - ring already allocated */ 398 /* Step 5 */ 399 ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP); 400 /* 401 * See section 4.3 bullet 6: 402 * The default Max Packet size for ep0 is "8 bytes for a USB2 403 * LS/FS/HS device or 512 bytes for a USB3 SS device" 404 * XXX: Not sure about wireless USB devices. 405 */ 406 if (udev->speed == USB_SPEED_SUPER) 407 ep0_ctx->ep_info2 |= MAX_PACKET(512); 408 else 409 ep0_ctx->ep_info2 |= MAX_PACKET(8); 410 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ 411 ep0_ctx->ep_info2 |= MAX_BURST(0); 412 ep0_ctx->ep_info2 |= ERROR_COUNT(3); 413 414 ep0_ctx->deq = 415 dev->ep_rings[0]->first_seg->dma; 416 ep0_ctx->deq |= dev->ep_rings[0]->cycle_state; 417 418 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ 419 420 return 0; 421 } 422 423 /* Return the polling or NAK interval. 424 * 425 * The polling interval is expressed in "microframes". If xHCI's Interval field 426 * is set to N, it will service the endpoint every 2^(Interval)*125us. 427 * 428 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval 429 * is set to 0. 430 */ 431 static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, 432 struct usb_host_endpoint *ep) 433 { 434 unsigned int interval = 0; 435 436 switch (udev->speed) { 437 case USB_SPEED_HIGH: 438 /* Max NAK rate */ 439 if (usb_endpoint_xfer_control(&ep->desc) || 440 usb_endpoint_xfer_bulk(&ep->desc)) 441 interval = ep->desc.bInterval; 442 /* Fall through - SS and HS isoc/int have same decoding */ 443 case USB_SPEED_SUPER: 444 if (usb_endpoint_xfer_int(&ep->desc) || 445 usb_endpoint_xfer_isoc(&ep->desc)) { 446 if (ep->desc.bInterval == 0) 447 interval = 0; 448 else 449 interval = ep->desc.bInterval - 1; 450 if (interval > 15) 451 interval = 15; 452 if (interval != ep->desc.bInterval + 1) 453 dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", 454 ep->desc.bEndpointAddress, 1 << interval); 455 } 456 break; 457 /* Convert bInterval (in 1-255 frames) to microframes and round down to 458 * nearest power of 2. 459 */ 460 case USB_SPEED_FULL: 461 case USB_SPEED_LOW: 462 if (usb_endpoint_xfer_int(&ep->desc) || 463 usb_endpoint_xfer_isoc(&ep->desc)) { 464 interval = fls(8*ep->desc.bInterval) - 1; 465 if (interval > 10) 466 interval = 10; 467 if (interval < 3) 468 interval = 3; 469 if ((1 << interval) != 8*ep->desc.bInterval) 470 dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", 471 ep->desc.bEndpointAddress, 1 << interval); 472 } 473 break; 474 default: 475 BUG(); 476 } 477 return EP_INTERVAL(interval); 478 } 479 480 static inline u32 xhci_get_endpoint_type(struct usb_device *udev, 481 struct usb_host_endpoint *ep) 482 { 483 int in; 484 u32 type; 485 486 in = usb_endpoint_dir_in(&ep->desc); 487 if (usb_endpoint_xfer_control(&ep->desc)) { 488 type = EP_TYPE(CTRL_EP); 489 } else if (usb_endpoint_xfer_bulk(&ep->desc)) { 490 if (in) 491 type = EP_TYPE(BULK_IN_EP); 492 else 493 type = EP_TYPE(BULK_OUT_EP); 494 } else if (usb_endpoint_xfer_isoc(&ep->desc)) { 495 if (in) 496 type = EP_TYPE(ISOC_IN_EP); 497 else 498 type = EP_TYPE(ISOC_OUT_EP); 499 } else if (usb_endpoint_xfer_int(&ep->desc)) { 500 if (in) 501 type = EP_TYPE(INT_IN_EP); 502 else 503 type = EP_TYPE(INT_OUT_EP); 504 } else { 505 BUG(); 506 } 507 return type; 508 } 509 510 int xhci_endpoint_init(struct xhci_hcd *xhci, 511 struct xhci_virt_device *virt_dev, 512 struct usb_device *udev, 513 struct usb_host_endpoint *ep, 514 gfp_t mem_flags) 515 { 516 unsigned int ep_index; 517 struct xhci_ep_ctx *ep_ctx; 518 struct xhci_ring *ep_ring; 519 unsigned int max_packet; 520 unsigned int max_burst; 521 522 ep_index = xhci_get_endpoint_index(&ep->desc); 523 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 524 525 /* Set up the endpoint ring */ 526 virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); 527 if (!virt_dev->new_ep_rings[ep_index]) 528 return -ENOMEM; 529 ep_ring = virt_dev->new_ep_rings[ep_index]; 530 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; 531 532 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); 533 534 /* FIXME dig Mult and streams info out of ep companion desc */ 535 536 /* Allow 3 retries for everything but isoc; 537 * error count = 0 means infinite retries. 538 */ 539 if (!usb_endpoint_xfer_isoc(&ep->desc)) 540 ep_ctx->ep_info2 = ERROR_COUNT(3); 541 else 542 ep_ctx->ep_info2 = ERROR_COUNT(1); 543 544 ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); 545 546 /* Set the max packet size and max burst */ 547 switch (udev->speed) { 548 case USB_SPEED_SUPER: 549 max_packet = ep->desc.wMaxPacketSize; 550 ep_ctx->ep_info2 |= MAX_PACKET(max_packet); 551 /* dig out max burst from ep companion desc */ 552 if (!ep->ss_ep_comp) { 553 xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n"); 554 max_packet = 0; 555 } else { 556 max_packet = ep->ss_ep_comp->desc.bMaxBurst; 557 } 558 ep_ctx->ep_info2 |= MAX_BURST(max_packet); 559 break; 560 case USB_SPEED_HIGH: 561 /* bits 11:12 specify the number of additional transaction 562 * opportunities per microframe (USB 2.0, section 9.6.6) 563 */ 564 if (usb_endpoint_xfer_isoc(&ep->desc) || 565 usb_endpoint_xfer_int(&ep->desc)) { 566 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; 567 ep_ctx->ep_info2 |= MAX_BURST(max_burst); 568 } 569 /* Fall through */ 570 case USB_SPEED_FULL: 571 case USB_SPEED_LOW: 572 max_packet = ep->desc.wMaxPacketSize & 0x3ff; 573 ep_ctx->ep_info2 |= MAX_PACKET(max_packet); 574 break; 575 default: 576 BUG(); 577 } 578 /* FIXME Debug endpoint context */ 579 return 0; 580 } 581 582 void xhci_endpoint_zero(struct xhci_hcd *xhci, 583 struct xhci_virt_device *virt_dev, 584 struct usb_host_endpoint *ep) 585 { 586 unsigned int ep_index; 587 struct xhci_ep_ctx *ep_ctx; 588 589 ep_index = xhci_get_endpoint_index(&ep->desc); 590 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 591 592 ep_ctx->ep_info = 0; 593 ep_ctx->ep_info2 = 0; 594 ep_ctx->deq = 0; 595 ep_ctx->tx_info = 0; 596 /* Don't free the endpoint ring until the set interface or configuration 597 * request succeeds. 598 */ 599 } 600 601 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ 602 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) 603 { 604 int i; 605 struct device *dev = xhci_to_hcd(xhci)->self.controller; 606 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 607 608 xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp); 609 610 if (!num_sp) 611 return 0; 612 613 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags); 614 if (!xhci->scratchpad) 615 goto fail_sp; 616 617 xhci->scratchpad->sp_array = 618 pci_alloc_consistent(to_pci_dev(dev), 619 num_sp * sizeof(u64), 620 &xhci->scratchpad->sp_dma); 621 if (!xhci->scratchpad->sp_array) 622 goto fail_sp2; 623 624 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags); 625 if (!xhci->scratchpad->sp_buffers) 626 goto fail_sp3; 627 628 xhci->scratchpad->sp_dma_buffers = 629 kzalloc(sizeof(dma_addr_t) * num_sp, flags); 630 631 if (!xhci->scratchpad->sp_dma_buffers) 632 goto fail_sp4; 633 634 xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma; 635 for (i = 0; i < num_sp; i++) { 636 dma_addr_t dma; 637 void *buf = pci_alloc_consistent(to_pci_dev(dev), 638 xhci->page_size, &dma); 639 if (!buf) 640 goto fail_sp5; 641 642 xhci->scratchpad->sp_array[i] = dma; 643 xhci->scratchpad->sp_buffers[i] = buf; 644 xhci->scratchpad->sp_dma_buffers[i] = dma; 645 } 646 647 return 0; 648 649 fail_sp5: 650 for (i = i - 1; i >= 0; i--) { 651 pci_free_consistent(to_pci_dev(dev), xhci->page_size, 652 xhci->scratchpad->sp_buffers[i], 653 xhci->scratchpad->sp_dma_buffers[i]); 654 } 655 kfree(xhci->scratchpad->sp_dma_buffers); 656 657 fail_sp4: 658 kfree(xhci->scratchpad->sp_buffers); 659 660 fail_sp3: 661 pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64), 662 xhci->scratchpad->sp_array, 663 xhci->scratchpad->sp_dma); 664 665 fail_sp2: 666 kfree(xhci->scratchpad); 667 xhci->scratchpad = NULL; 668 669 fail_sp: 670 return -ENOMEM; 671 } 672 673 static void scratchpad_free(struct xhci_hcd *xhci) 674 { 675 int num_sp; 676 int i; 677 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 678 679 if (!xhci->scratchpad) 680 return; 681 682 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 683 684 for (i = 0; i < num_sp; i++) { 685 pci_free_consistent(pdev, xhci->page_size, 686 xhci->scratchpad->sp_buffers[i], 687 xhci->scratchpad->sp_dma_buffers[i]); 688 } 689 kfree(xhci->scratchpad->sp_dma_buffers); 690 kfree(xhci->scratchpad->sp_buffers); 691 pci_free_consistent(pdev, num_sp * sizeof(u64), 692 xhci->scratchpad->sp_array, 693 xhci->scratchpad->sp_dma); 694 kfree(xhci->scratchpad); 695 xhci->scratchpad = NULL; 696 } 697 698 void xhci_mem_cleanup(struct xhci_hcd *xhci) 699 { 700 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 701 int size; 702 int i; 703 704 /* Free the Event Ring Segment Table and the actual Event Ring */ 705 xhci_writel(xhci, 0, &xhci->ir_set->erst_size); 706 xhci_write_64(xhci, 0, &xhci->ir_set->erst_base); 707 xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue); 708 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 709 if (xhci->erst.entries) 710 pci_free_consistent(pdev, size, 711 xhci->erst.entries, xhci->erst.erst_dma_addr); 712 xhci->erst.entries = NULL; 713 xhci_dbg(xhci, "Freed ERST\n"); 714 if (xhci->event_ring) 715 xhci_ring_free(xhci, xhci->event_ring); 716 xhci->event_ring = NULL; 717 xhci_dbg(xhci, "Freed event ring\n"); 718 719 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); 720 if (xhci->cmd_ring) 721 xhci_ring_free(xhci, xhci->cmd_ring); 722 xhci->cmd_ring = NULL; 723 xhci_dbg(xhci, "Freed command ring\n"); 724 725 for (i = 1; i < MAX_HC_SLOTS; ++i) 726 xhci_free_virt_device(xhci, i); 727 728 if (xhci->segment_pool) 729 dma_pool_destroy(xhci->segment_pool); 730 xhci->segment_pool = NULL; 731 xhci_dbg(xhci, "Freed segment pool\n"); 732 733 if (xhci->device_pool) 734 dma_pool_destroy(xhci->device_pool); 735 xhci->device_pool = NULL; 736 xhci_dbg(xhci, "Freed device context pool\n"); 737 738 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); 739 if (xhci->dcbaa) 740 pci_free_consistent(pdev, sizeof(*xhci->dcbaa), 741 xhci->dcbaa, xhci->dcbaa->dma); 742 xhci->dcbaa = NULL; 743 744 xhci->page_size = 0; 745 xhci->page_shift = 0; 746 scratchpad_free(xhci); 747 } 748 749 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) 750 { 751 dma_addr_t dma; 752 struct device *dev = xhci_to_hcd(xhci)->self.controller; 753 unsigned int val, val2; 754 u64 val_64; 755 struct xhci_segment *seg; 756 u32 page_size; 757 int i; 758 759 page_size = xhci_readl(xhci, &xhci->op_regs->page_size); 760 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size); 761 for (i = 0; i < 16; i++) { 762 if ((0x1 & page_size) != 0) 763 break; 764 page_size = page_size >> 1; 765 } 766 if (i < 16) 767 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024); 768 else 769 xhci_warn(xhci, "WARN: no supported page size\n"); 770 /* Use 4K pages, since that's common and the minimum the HC supports */ 771 xhci->page_shift = 12; 772 xhci->page_size = 1 << xhci->page_shift; 773 xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024); 774 775 /* 776 * Program the Number of Device Slots Enabled field in the CONFIG 777 * register with the max value of slots the HC can handle. 778 */ 779 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1)); 780 xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n", 781 (unsigned int) val); 782 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg); 783 val |= (val2 & ~HCS_SLOTS_MASK); 784 xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n", 785 (unsigned int) val); 786 xhci_writel(xhci, val, &xhci->op_regs->config_reg); 787 788 /* 789 * Section 5.4.8 - doorbell array must be 790 * "physically contiguous and 64-byte (cache line) aligned". 791 */ 792 xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev), 793 sizeof(*xhci->dcbaa), &dma); 794 if (!xhci->dcbaa) 795 goto fail; 796 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); 797 xhci->dcbaa->dma = dma; 798 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", 799 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); 800 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); 801 802 /* 803 * Initialize the ring segment pool. The ring must be a contiguous 804 * structure comprised of TRBs. The TRBs must be 16 byte aligned, 805 * however, the command ring segment needs 64-byte aligned segments, 806 * so we pick the greater alignment need. 807 */ 808 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, 809 SEGMENT_SIZE, 64, xhci->page_size); 810 811 /* See Table 46 and Note on Figure 55 */ 812 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 813 2112, 64, xhci->page_size); 814 if (!xhci->segment_pool || !xhci->device_pool) 815 goto fail; 816 817 /* Set up the command ring to have one segments for now. */ 818 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags); 819 if (!xhci->cmd_ring) 820 goto fail; 821 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); 822 xhci_dbg(xhci, "First segment DMA is 0x%llx\n", 823 (unsigned long long)xhci->cmd_ring->first_seg->dma); 824 825 /* Set the address in the Command Ring Control register */ 826 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 827 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 828 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | 829 xhci->cmd_ring->cycle_state; 830 xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val); 831 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 832 xhci_dbg_cmd_ptrs(xhci); 833 834 val = xhci_readl(xhci, &xhci->cap_regs->db_off); 835 val &= DBOFF_MASK; 836 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" 837 " from cap regs base addr\n", val); 838 xhci->dba = (void *) xhci->cap_regs + val; 839 xhci_dbg_regs(xhci); 840 xhci_print_run_regs(xhci); 841 /* Set ir_set to interrupt register set 0 */ 842 xhci->ir_set = (void *) xhci->run_regs->ir_set; 843 844 /* 845 * Event ring setup: Allocate a normal ring, but also setup 846 * the event ring segment table (ERST). Section 4.9.3. 847 */ 848 xhci_dbg(xhci, "// Allocating event ring\n"); 849 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); 850 if (!xhci->event_ring) 851 goto fail; 852 853 xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev), 854 sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma); 855 if (!xhci->erst.entries) 856 goto fail; 857 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n", 858 (unsigned long long)dma); 859 860 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); 861 xhci->erst.num_entries = ERST_NUM_SEGS; 862 xhci->erst.erst_dma_addr = dma; 863 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n", 864 xhci->erst.num_entries, 865 xhci->erst.entries, 866 (unsigned long long)xhci->erst.erst_dma_addr); 867 868 /* set ring base address and size for each segment table entry */ 869 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { 870 struct xhci_erst_entry *entry = &xhci->erst.entries[val]; 871 entry->seg_addr = seg->dma; 872 entry->seg_size = TRBS_PER_SEGMENT; 873 entry->rsvd = 0; 874 seg = seg->next; 875 } 876 877 /* set ERST count with the number of entries in the segment table */ 878 val = xhci_readl(xhci, &xhci->ir_set->erst_size); 879 val &= ERST_SIZE_MASK; 880 val |= ERST_NUM_SEGS; 881 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n", 882 val); 883 xhci_writel(xhci, val, &xhci->ir_set->erst_size); 884 885 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n"); 886 /* set the segment table base address */ 887 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", 888 (unsigned long long)xhci->erst.erst_dma_addr); 889 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); 890 val_64 &= ERST_PTR_MASK; 891 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); 892 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); 893 894 /* Set the event ring dequeue address */ 895 xhci_set_hc_event_deq(xhci); 896 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); 897 xhci_print_ir_set(xhci, xhci->ir_set, 0); 898 899 /* 900 * XXX: Might need to set the Interrupter Moderation Register to 901 * something other than the default (~1ms minimum between interrupts). 902 * See section 5.5.1.2. 903 */ 904 init_completion(&xhci->addr_dev); 905 for (i = 0; i < MAX_HC_SLOTS; ++i) 906 xhci->devs[i] = 0; 907 908 if (scratchpad_alloc(xhci, flags)) 909 goto fail; 910 911 return 0; 912 913 fail: 914 xhci_warn(xhci, "Couldn't initialize memory\n"); 915 xhci_mem_cleanup(xhci); 916 return -ENOMEM; 917 } 918