1 /* 2 * USB HOST XHCI Controller stack 3 * 4 * Based on xHCI host controller driver in linux-kernel 5 * by Sarah Sharp. 6 * 7 * Copyright (C) 2008 Intel Corp. 8 * Author: Sarah Sharp 9 * 10 * Copyright (C) 2013 Samsung Electronics Co.Ltd 11 * Authors: Vivek Gautam <gautam.vivek@samsung.com> 12 * Vikas Sajjan <vikas.sajjan@samsung.com> 13 * 14 * SPDX-License-Identifier: GPL-2.0+ 15 */ 16 17 #include <common.h> 18 #include <dm.h> 19 #include <asm/byteorder.h> 20 #include <usb.h> 21 #include <malloc.h> 22 #include <asm/cache.h> 23 #include <linux/errno.h> 24 25 #include "xhci.h" 26 27 #define CACHELINE_SIZE CONFIG_SYS_CACHELINE_SIZE 28 /** 29 * flushes the address passed till the length 30 * 31 * @param addr pointer to memory region to be flushed 32 * @param len the length of the cache line to be flushed 33 * @return none 34 */ 35 void xhci_flush_cache(uintptr_t addr, u32 len) 36 { 37 BUG_ON((void *)addr == NULL || len == 0); 38 39 flush_dcache_range(addr & ~(CACHELINE_SIZE - 1), 40 ALIGN(addr + len, CACHELINE_SIZE)); 41 } 42 43 /** 44 * invalidates the address passed till the length 45 * 46 * @param addr pointer to memory region to be invalidates 47 * @param len the length of the cache line to be invalidated 48 * @return none 49 */ 50 void xhci_inval_cache(uintptr_t addr, u32 len) 51 { 52 BUG_ON((void *)addr == NULL || len == 0); 53 54 invalidate_dcache_range(addr & ~(CACHELINE_SIZE - 1), 55 ALIGN(addr + len, CACHELINE_SIZE)); 56 } 57 58 59 /** 60 * frees the "segment" pointer passed 61 * 62 * @param ptr pointer to "segement" to be freed 63 * @return none 64 */ 65 static void xhci_segment_free(struct xhci_segment *seg) 66 { 67 free(seg->trbs); 68 seg->trbs = NULL; 69 70 free(seg); 71 } 72 73 /** 74 * frees the "ring" pointer passed 75 * 76 * @param ptr pointer to "ring" to be freed 77 * @return none 78 */ 79 static void xhci_ring_free(struct xhci_ring *ring) 80 { 81 struct xhci_segment *seg; 82 struct xhci_segment *first_seg; 83 84 BUG_ON(!ring); 85 86 first_seg = ring->first_seg; 87 seg = first_seg->next; 88 while (seg != first_seg) { 89 struct xhci_segment *next = seg->next; 90 xhci_segment_free(seg); 91 seg = next; 92 } 93 xhci_segment_free(first_seg); 94 95 free(ring); 96 } 97 98 /** 99 * Free the scratchpad buffer array and scratchpad buffers 100 * 101 * @ctrl host controller data structure 102 * @return none 103 */ 104 static void xhci_scratchpad_free(struct xhci_ctrl *ctrl) 105 { 106 if (!ctrl->scratchpad) 107 return; 108 109 ctrl->dcbaa->dev_context_ptrs[0] = 0; 110 111 free((void *)(uintptr_t)ctrl->scratchpad->sp_array[0]); 112 free(ctrl->scratchpad->sp_array); 113 free(ctrl->scratchpad); 114 ctrl->scratchpad = NULL; 115 } 116 117 /** 118 * frees the "xhci_container_ctx" pointer passed 119 * 120 * @param ptr pointer to "xhci_container_ctx" to be freed 121 * @return none 122 */ 123 static void xhci_free_container_ctx(struct xhci_container_ctx *ctx) 124 { 125 free(ctx->bytes); 126 free(ctx); 127 } 128 129 /** 130 * frees the virtual devices for "xhci_ctrl" pointer passed 131 * 132 * @param ptr pointer to "xhci_ctrl" whose virtual devices are to be freed 133 * @return none 134 */ 135 static void xhci_free_virt_devices(struct xhci_ctrl *ctrl) 136 { 137 int i; 138 int slot_id; 139 struct xhci_virt_device *virt_dev; 140 141 /* 142 * refactored here to loop through all virt_dev 143 * Slot ID 0 is reserved 144 */ 145 for (slot_id = 0; slot_id < MAX_HC_SLOTS; slot_id++) { 146 virt_dev = ctrl->devs[slot_id]; 147 if (!virt_dev) 148 continue; 149 150 ctrl->dcbaa->dev_context_ptrs[slot_id] = 0; 151 152 for (i = 0; i < 31; ++i) 153 if (virt_dev->eps[i].ring) 154 xhci_ring_free(virt_dev->eps[i].ring); 155 156 if (virt_dev->in_ctx) 157 xhci_free_container_ctx(virt_dev->in_ctx); 158 if (virt_dev->out_ctx) 159 xhci_free_container_ctx(virt_dev->out_ctx); 160 161 free(virt_dev); 162 /* make sure we are pointing to NULL */ 163 ctrl->devs[slot_id] = NULL; 164 } 165 } 166 167 /** 168 * frees all the memory allocated 169 * 170 * @param ptr pointer to "xhci_ctrl" to be cleaned up 171 * @return none 172 */ 173 void xhci_cleanup(struct xhci_ctrl *ctrl) 174 { 175 xhci_ring_free(ctrl->event_ring); 176 xhci_ring_free(ctrl->cmd_ring); 177 xhci_scratchpad_free(ctrl); 178 xhci_free_virt_devices(ctrl); 179 free(ctrl->erst.entries); 180 free(ctrl->dcbaa); 181 memset(ctrl, '\0', sizeof(struct xhci_ctrl)); 182 } 183 184 /** 185 * Malloc the aligned memory 186 * 187 * @param size size of memory to be allocated 188 * @return allocates the memory and returns the aligned pointer 189 */ 190 static void *xhci_malloc(unsigned int size) 191 { 192 void *ptr; 193 size_t cacheline_size = max(XHCI_ALIGNMENT, CACHELINE_SIZE); 194 195 ptr = memalign(cacheline_size, ALIGN(size, cacheline_size)); 196 BUG_ON(!ptr); 197 memset(ptr, '\0', size); 198 199 xhci_flush_cache((uintptr_t)ptr, size); 200 201 return ptr; 202 } 203 204 /** 205 * Make the prev segment point to the next segment. 206 * Change the last TRB in the prev segment to be a Link TRB which points to the 207 * address of the next segment. The caller needs to set any Link TRB 208 * related flags, such as End TRB, Toggle Cycle, and no snoop. 209 * 210 * @param prev pointer to the previous segment 211 * @param next pointer to the next segment 212 * @param link_trbs flag to indicate whether to link the trbs or NOT 213 * @return none 214 */ 215 static void xhci_link_segments(struct xhci_segment *prev, 216 struct xhci_segment *next, bool link_trbs) 217 { 218 u32 val; 219 u64 val_64 = 0; 220 221 if (!prev || !next) 222 return; 223 prev->next = next; 224 if (link_trbs) { 225 val_64 = (uintptr_t)next->trbs; 226 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = val_64; 227 228 /* 229 * Set the last TRB in the segment to 230 * have a TRB type ID of Link TRB 231 */ 232 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); 233 val &= ~TRB_TYPE_BITMASK; 234 val |= (TRB_LINK << TRB_TYPE_SHIFT); 235 236 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); 237 } 238 } 239 240 /** 241 * Initialises the Ring's enqueue,dequeue,enq_seg pointers 242 * 243 * @param ring pointer to the RING to be intialised 244 * @return none 245 */ 246 static void xhci_initialize_ring_info(struct xhci_ring *ring) 247 { 248 /* 249 * The ring is empty, so the enqueue pointer == dequeue pointer 250 */ 251 ring->enqueue = ring->first_seg->trbs; 252 ring->enq_seg = ring->first_seg; 253 ring->dequeue = ring->enqueue; 254 ring->deq_seg = ring->first_seg; 255 256 /* 257 * The ring is initialized to 0. The producer must write 1 to the 258 * cycle bit to handover ownership of the TRB, so PCS = 1. 259 * The consumer must compare CCS to the cycle bit to 260 * check ownership, so CCS = 1. 261 */ 262 ring->cycle_state = 1; 263 } 264 265 /** 266 * Allocates a generic ring segment from the ring pool, sets the dma address, 267 * initializes the segment to zero, and sets the private next pointer to NULL. 268 * Section 4.11.1.1: 269 * "All components of all Command and Transfer TRBs shall be initialized to '0'" 270 * 271 * @param none 272 * @return pointer to the newly allocated SEGMENT 273 */ 274 static struct xhci_segment *xhci_segment_alloc(void) 275 { 276 struct xhci_segment *seg; 277 278 seg = (struct xhci_segment *)malloc(sizeof(struct xhci_segment)); 279 BUG_ON(!seg); 280 281 seg->trbs = (union xhci_trb *)xhci_malloc(SEGMENT_SIZE); 282 283 seg->next = NULL; 284 285 return seg; 286 } 287 288 /** 289 * Create a new ring with zero or more segments. 290 * TODO: current code only uses one-time-allocated single-segment rings 291 * of 1KB anyway, so we might as well get rid of all the segment and 292 * linking code (and maybe increase the size a bit, e.g. 4KB). 293 * 294 * 295 * Link each segment together into a ring. 296 * Set the end flag and the cycle toggle bit on the last segment. 297 * See section 4.9.2 and figures 15 and 16 of XHCI spec rev1.0. 298 * 299 * @param num_segs number of segments in the ring 300 * @param link_trbs flag to indicate whether to link the trbs or NOT 301 * @return pointer to the newly created RING 302 */ 303 struct xhci_ring *xhci_ring_alloc(unsigned int num_segs, bool link_trbs) 304 { 305 struct xhci_ring *ring; 306 struct xhci_segment *prev; 307 308 ring = (struct xhci_ring *)malloc(sizeof(struct xhci_ring)); 309 BUG_ON(!ring); 310 311 if (num_segs == 0) 312 return ring; 313 314 ring->first_seg = xhci_segment_alloc(); 315 BUG_ON(!ring->first_seg); 316 317 num_segs--; 318 319 prev = ring->first_seg; 320 while (num_segs > 0) { 321 struct xhci_segment *next; 322 323 next = xhci_segment_alloc(); 324 BUG_ON(!next); 325 326 xhci_link_segments(prev, next, link_trbs); 327 328 prev = next; 329 num_segs--; 330 } 331 xhci_link_segments(prev, ring->first_seg, link_trbs); 332 if (link_trbs) { 333 /* See section 4.9.2.1 and 6.4.4.1 */ 334 prev->trbs[TRBS_PER_SEGMENT-1].link.control |= 335 cpu_to_le32(LINK_TOGGLE); 336 } 337 xhci_initialize_ring_info(ring); 338 339 return ring; 340 } 341 342 /** 343 * Set up the scratchpad buffer array and scratchpad buffers 344 * 345 * @ctrl host controller data structure 346 * @return -ENOMEM if buffer allocation fails, 0 on success 347 */ 348 static int xhci_scratchpad_alloc(struct xhci_ctrl *ctrl) 349 { 350 struct xhci_hccr *hccr = ctrl->hccr; 351 struct xhci_hcor *hcor = ctrl->hcor; 352 struct xhci_scratchpad *scratchpad; 353 int num_sp; 354 uint32_t page_size; 355 void *buf; 356 int i; 357 358 num_sp = HCS_MAX_SCRATCHPAD(xhci_readl(&hccr->cr_hcsparams2)); 359 if (!num_sp) 360 return 0; 361 362 scratchpad = malloc(sizeof(*scratchpad)); 363 if (!scratchpad) 364 goto fail_sp; 365 ctrl->scratchpad = scratchpad; 366 367 scratchpad->sp_array = xhci_malloc(num_sp * sizeof(u64)); 368 if (!scratchpad->sp_array) 369 goto fail_sp2; 370 ctrl->dcbaa->dev_context_ptrs[0] = 371 cpu_to_le64((uintptr_t)scratchpad->sp_array); 372 373 page_size = xhci_readl(&hcor->or_pagesize) & 0xffff; 374 for (i = 0; i < 16; i++) { 375 if ((0x1 & page_size) != 0) 376 break; 377 page_size = page_size >> 1; 378 } 379 BUG_ON(i == 16); 380 381 page_size = 1 << (i + 12); 382 buf = memalign(page_size, num_sp * page_size); 383 if (!buf) 384 goto fail_sp3; 385 memset(buf, '\0', num_sp * page_size); 386 xhci_flush_cache((uintptr_t)buf, num_sp * page_size); 387 388 for (i = 0; i < num_sp; i++) { 389 uintptr_t ptr = (uintptr_t)buf + i * page_size; 390 scratchpad->sp_array[i] = cpu_to_le64(ptr); 391 } 392 393 return 0; 394 395 fail_sp3: 396 free(scratchpad->sp_array); 397 398 fail_sp2: 399 free(scratchpad); 400 ctrl->scratchpad = NULL; 401 402 fail_sp: 403 return -ENOMEM; 404 } 405 406 /** 407 * Allocates the Container context 408 * 409 * @param ctrl Host controller data structure 410 * @param type type of XHCI Container Context 411 * @return NULL if failed else pointer to the context on success 412 */ 413 static struct xhci_container_ctx 414 *xhci_alloc_container_ctx(struct xhci_ctrl *ctrl, int type) 415 { 416 struct xhci_container_ctx *ctx; 417 418 ctx = (struct xhci_container_ctx *) 419 malloc(sizeof(struct xhci_container_ctx)); 420 BUG_ON(!ctx); 421 422 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); 423 ctx->type = type; 424 ctx->size = (MAX_EP_CTX_NUM + 1) * 425 CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)); 426 if (type == XHCI_CTX_TYPE_INPUT) 427 ctx->size += CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)); 428 429 ctx->bytes = (u8 *)xhci_malloc(ctx->size); 430 431 return ctx; 432 } 433 434 /** 435 * Allocating virtual device 436 * 437 * @param udev pointer to USB deivce structure 438 * @return 0 on success else -1 on failure 439 */ 440 int xhci_alloc_virt_device(struct xhci_ctrl *ctrl, unsigned int slot_id) 441 { 442 u64 byte_64 = 0; 443 struct xhci_virt_device *virt_dev; 444 445 /* Slot ID 0 is reserved */ 446 if (ctrl->devs[slot_id]) { 447 printf("Virt dev for slot[%d] already allocated\n", slot_id); 448 return -EEXIST; 449 } 450 451 ctrl->devs[slot_id] = (struct xhci_virt_device *) 452 malloc(sizeof(struct xhci_virt_device)); 453 454 if (!ctrl->devs[slot_id]) { 455 puts("Failed to allocate virtual device\n"); 456 return -ENOMEM; 457 } 458 459 memset(ctrl->devs[slot_id], 0, sizeof(struct xhci_virt_device)); 460 virt_dev = ctrl->devs[slot_id]; 461 462 /* Allocate the (output) device context that will be used in the HC. */ 463 virt_dev->out_ctx = xhci_alloc_container_ctx(ctrl, 464 XHCI_CTX_TYPE_DEVICE); 465 if (!virt_dev->out_ctx) { 466 puts("Failed to allocate out context for virt dev\n"); 467 return -ENOMEM; 468 } 469 470 /* Allocate the (input) device context for address device command */ 471 virt_dev->in_ctx = xhci_alloc_container_ctx(ctrl, 472 XHCI_CTX_TYPE_INPUT); 473 if (!virt_dev->in_ctx) { 474 puts("Failed to allocate in context for virt dev\n"); 475 return -ENOMEM; 476 } 477 478 /* Allocate endpoint 0 ring */ 479 virt_dev->eps[0].ring = xhci_ring_alloc(1, true); 480 481 byte_64 = (uintptr_t)(virt_dev->out_ctx->bytes); 482 483 /* Point to output device context in dcbaa. */ 484 ctrl->dcbaa->dev_context_ptrs[slot_id] = byte_64; 485 486 xhci_flush_cache((uintptr_t)&ctrl->dcbaa->dev_context_ptrs[slot_id], 487 sizeof(__le64)); 488 return 0; 489 } 490 491 /** 492 * Allocates the necessary data structures 493 * for XHCI host controller 494 * 495 * @param ctrl Host controller data structure 496 * @param hccr pointer to HOST Controller Control Registers 497 * @param hcor pointer to HOST Controller Operational Registers 498 * @return 0 if successful else -1 on failure 499 */ 500 int xhci_mem_init(struct xhci_ctrl *ctrl, struct xhci_hccr *hccr, 501 struct xhci_hcor *hcor) 502 { 503 uint64_t val_64; 504 uint64_t trb_64; 505 uint32_t val; 506 unsigned long deq; 507 int i; 508 struct xhci_segment *seg; 509 510 /* DCBAA initialization */ 511 ctrl->dcbaa = (struct xhci_device_context_array *) 512 xhci_malloc(sizeof(struct xhci_device_context_array)); 513 if (ctrl->dcbaa == NULL) { 514 puts("unable to allocate DCBA\n"); 515 return -ENOMEM; 516 } 517 518 val_64 = (uintptr_t)ctrl->dcbaa; 519 /* Set the pointer in DCBAA register */ 520 xhci_writeq(&hcor->or_dcbaap, val_64); 521 522 /* Command ring control pointer register initialization */ 523 ctrl->cmd_ring = xhci_ring_alloc(1, true); 524 525 /* Set the address in the Command Ring Control register */ 526 trb_64 = (uintptr_t)ctrl->cmd_ring->first_seg->trbs; 527 val_64 = xhci_readq(&hcor->or_crcr); 528 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 529 (trb_64 & (u64) ~CMD_RING_RSVD_BITS) | 530 ctrl->cmd_ring->cycle_state; 531 xhci_writeq(&hcor->or_crcr, val_64); 532 533 /* write the address of db register */ 534 val = xhci_readl(&hccr->cr_dboff); 535 val &= DBOFF_MASK; 536 ctrl->dba = (struct xhci_doorbell_array *)((char *)hccr + val); 537 538 /* write the address of runtime register */ 539 val = xhci_readl(&hccr->cr_rtsoff); 540 val &= RTSOFF_MASK; 541 ctrl->run_regs = (struct xhci_run_regs *)((char *)hccr + val); 542 543 /* writting the address of ir_set structure */ 544 ctrl->ir_set = &ctrl->run_regs->ir_set[0]; 545 546 /* Event ring does not maintain link TRB */ 547 ctrl->event_ring = xhci_ring_alloc(ERST_NUM_SEGS, false); 548 ctrl->erst.entries = (struct xhci_erst_entry *) 549 xhci_malloc(sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS); 550 551 ctrl->erst.num_entries = ERST_NUM_SEGS; 552 553 for (val = 0, seg = ctrl->event_ring->first_seg; 554 val < ERST_NUM_SEGS; 555 val++) { 556 trb_64 = 0; 557 trb_64 = (uintptr_t)seg->trbs; 558 struct xhci_erst_entry *entry = &ctrl->erst.entries[val]; 559 xhci_writeq(&entry->seg_addr, trb_64); 560 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); 561 entry->rsvd = 0; 562 seg = seg->next; 563 } 564 xhci_flush_cache((uintptr_t)ctrl->erst.entries, 565 ERST_NUM_SEGS * sizeof(struct xhci_erst_entry)); 566 567 deq = (unsigned long)ctrl->event_ring->dequeue; 568 569 /* Update HC event ring dequeue pointer */ 570 xhci_writeq(&ctrl->ir_set->erst_dequeue, 571 (u64)deq & (u64)~ERST_PTR_MASK); 572 573 /* set ERST count with the number of entries in the segment table */ 574 val = xhci_readl(&ctrl->ir_set->erst_size); 575 val &= ERST_SIZE_MASK; 576 val |= ERST_NUM_SEGS; 577 xhci_writel(&ctrl->ir_set->erst_size, val); 578 579 /* this is the event ring segment table pointer */ 580 val_64 = xhci_readq(&ctrl->ir_set->erst_base); 581 val_64 &= ERST_PTR_MASK; 582 val_64 |= ((uintptr_t)(ctrl->erst.entries) & ~ERST_PTR_MASK); 583 584 xhci_writeq(&ctrl->ir_set->erst_base, val_64); 585 586 /* set up the scratchpad buffer array and scratchpad buffers */ 587 xhci_scratchpad_alloc(ctrl); 588 589 /* initializing the virtual devices to NULL */ 590 for (i = 0; i < MAX_HC_SLOTS; ++i) 591 ctrl->devs[i] = NULL; 592 593 /* 594 * Just Zero'ing this register completely, 595 * or some spurious Device Notification Events 596 * might screw things here. 597 */ 598 xhci_writel(&hcor->or_dnctrl, 0x0); 599 600 return 0; 601 } 602 603 /** 604 * Give the input control context for the passed container context 605 * 606 * @param ctx pointer to the context 607 * @return pointer to the Input control context data 608 */ 609 struct xhci_input_control_ctx 610 *xhci_get_input_control_ctx(struct xhci_container_ctx *ctx) 611 { 612 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); 613 return (struct xhci_input_control_ctx *)ctx->bytes; 614 } 615 616 /** 617 * Give the slot context for the passed container context 618 * 619 * @param ctrl Host controller data structure 620 * @param ctx pointer to the context 621 * @return pointer to the slot control context data 622 */ 623 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_ctrl *ctrl, 624 struct xhci_container_ctx *ctx) 625 { 626 if (ctx->type == XHCI_CTX_TYPE_DEVICE) 627 return (struct xhci_slot_ctx *)ctx->bytes; 628 629 return (struct xhci_slot_ctx *) 630 (ctx->bytes + CTX_SIZE(readl(&ctrl->hccr->cr_hccparams))); 631 } 632 633 /** 634 * Gets the EP context from based on the ep_index 635 * 636 * @param ctrl Host controller data structure 637 * @param ctx context container 638 * @param ep_index index of the endpoint 639 * @return pointer to the End point context 640 */ 641 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_ctrl *ctrl, 642 struct xhci_container_ctx *ctx, 643 unsigned int ep_index) 644 { 645 /* increment ep index by offset of start of ep ctx array */ 646 ep_index++; 647 if (ctx->type == XHCI_CTX_TYPE_INPUT) 648 ep_index++; 649 650 return (struct xhci_ep_ctx *) 651 (ctx->bytes + 652 (ep_index * CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)))); 653 } 654 655 /** 656 * Copy output xhci_ep_ctx to the input xhci_ep_ctx copy. 657 * Useful when you want to change one particular aspect of the endpoint 658 * and then issue a configure endpoint command. 659 * 660 * @param ctrl Host controller data structure 661 * @param in_ctx contains the input context 662 * @param out_ctx contains the input context 663 * @param ep_index index of the end point 664 * @return none 665 */ 666 void xhci_endpoint_copy(struct xhci_ctrl *ctrl, 667 struct xhci_container_ctx *in_ctx, 668 struct xhci_container_ctx *out_ctx, 669 unsigned int ep_index) 670 { 671 struct xhci_ep_ctx *out_ep_ctx; 672 struct xhci_ep_ctx *in_ep_ctx; 673 674 out_ep_ctx = xhci_get_ep_ctx(ctrl, out_ctx, ep_index); 675 in_ep_ctx = xhci_get_ep_ctx(ctrl, in_ctx, ep_index); 676 677 in_ep_ctx->ep_info = out_ep_ctx->ep_info; 678 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; 679 in_ep_ctx->deq = out_ep_ctx->deq; 680 in_ep_ctx->tx_info = out_ep_ctx->tx_info; 681 } 682 683 /** 684 * Copy output xhci_slot_ctx to the input xhci_slot_ctx. 685 * Useful when you want to change one particular aspect of the endpoint 686 * and then issue a configure endpoint command. 687 * Only the context entries field matters, but 688 * we'll copy the whole thing anyway. 689 * 690 * @param ctrl Host controller data structure 691 * @param in_ctx contains the inpout context 692 * @param out_ctx contains the inpout context 693 * @return none 694 */ 695 void xhci_slot_copy(struct xhci_ctrl *ctrl, struct xhci_container_ctx *in_ctx, 696 struct xhci_container_ctx *out_ctx) 697 { 698 struct xhci_slot_ctx *in_slot_ctx; 699 struct xhci_slot_ctx *out_slot_ctx; 700 701 in_slot_ctx = xhci_get_slot_ctx(ctrl, in_ctx); 702 out_slot_ctx = xhci_get_slot_ctx(ctrl, out_ctx); 703 704 in_slot_ctx->dev_info = out_slot_ctx->dev_info; 705 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; 706 in_slot_ctx->tt_info = out_slot_ctx->tt_info; 707 in_slot_ctx->dev_state = out_slot_ctx->dev_state; 708 } 709 710 /** 711 * Setup an xHCI virtual device for a Set Address command 712 * 713 * @param udev pointer to the Device Data Structure 714 * @return returns negative value on failure else 0 on success 715 */ 716 void xhci_setup_addressable_virt_dev(struct xhci_ctrl *ctrl, 717 struct usb_device *udev, int hop_portnr) 718 { 719 struct xhci_virt_device *virt_dev; 720 struct xhci_ep_ctx *ep0_ctx; 721 struct xhci_slot_ctx *slot_ctx; 722 u32 port_num = 0; 723 u64 trb_64 = 0; 724 int slot_id = udev->slot_id; 725 int speed = udev->speed; 726 int route = 0; 727 #ifdef CONFIG_DM_USB 728 struct usb_device *dev = udev; 729 struct usb_hub_device *hub; 730 #endif 731 732 virt_dev = ctrl->devs[slot_id]; 733 734 BUG_ON(!virt_dev); 735 736 /* Extract the EP0 and Slot Ctrl */ 737 ep0_ctx = xhci_get_ep_ctx(ctrl, virt_dev->in_ctx, 0); 738 slot_ctx = xhci_get_slot_ctx(ctrl, virt_dev->in_ctx); 739 740 /* Only the control endpoint is valid - one endpoint context */ 741 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); 742 743 #ifdef CONFIG_DM_USB 744 /* Calculate the route string for this device */ 745 port_num = dev->portnr; 746 while (!usb_hub_is_root_hub(dev->dev)) { 747 hub = dev_get_uclass_priv(dev->dev); 748 /* 749 * Each hub in the topology is expected to have no more than 750 * 15 ports in order for the route string of a device to be 751 * unique. SuperSpeed hubs are restricted to only having 15 752 * ports, but FS/LS/HS hubs are not. The xHCI specification 753 * says that if the port number the device is greater than 15, 754 * that portion of the route string shall be set to 15. 755 */ 756 if (port_num > 15) 757 port_num = 15; 758 route |= port_num << (hub->hub_depth * 4); 759 dev = dev_get_parent_priv(dev->dev); 760 port_num = dev->portnr; 761 dev = dev_get_parent_priv(dev->dev->parent); 762 } 763 764 debug("route string %x\n", route); 765 #endif 766 slot_ctx->dev_info |= route; 767 768 switch (speed) { 769 case USB_SPEED_SUPER: 770 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS); 771 break; 772 case USB_SPEED_HIGH: 773 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS); 774 break; 775 case USB_SPEED_FULL: 776 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS); 777 break; 778 case USB_SPEED_LOW: 779 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS); 780 break; 781 default: 782 /* Speed was set earlier, this shouldn't happen. */ 783 BUG(); 784 } 785 786 #ifdef CONFIG_DM_USB 787 /* Set up TT fields to support FS/LS devices */ 788 if (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) { 789 struct udevice *parent = udev->dev; 790 791 dev = udev; 792 do { 793 port_num = dev->portnr; 794 dev = dev_get_parent_priv(parent); 795 if (usb_hub_is_root_hub(dev->dev)) 796 break; 797 parent = dev->dev->parent; 798 } while (dev->speed != USB_SPEED_HIGH); 799 800 if (!usb_hub_is_root_hub(dev->dev)) { 801 hub = dev_get_uclass_priv(dev->dev); 802 if (hub->tt.multi) 803 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 804 slot_ctx->tt_info |= cpu_to_le32(TT_PORT(port_num)); 805 slot_ctx->tt_info |= cpu_to_le32(TT_SLOT(dev->slot_id)); 806 } 807 } 808 #endif 809 810 port_num = hop_portnr; 811 debug("port_num = %d\n", port_num); 812 813 slot_ctx->dev_info2 |= 814 cpu_to_le32(((port_num & ROOT_HUB_PORT_MASK) << 815 ROOT_HUB_PORT_SHIFT)); 816 817 /* Step 4 - ring already allocated */ 818 /* Step 5 */ 819 ep0_ctx->ep_info2 = cpu_to_le32(CTRL_EP << EP_TYPE_SHIFT); 820 debug("SPEED = %d\n", speed); 821 822 switch (speed) { 823 case USB_SPEED_SUPER: 824 ep0_ctx->ep_info2 |= cpu_to_le32(((512 & MAX_PACKET_MASK) << 825 MAX_PACKET_SHIFT)); 826 debug("Setting Packet size = 512bytes\n"); 827 break; 828 case USB_SPEED_HIGH: 829 /* USB core guesses at a 64-byte max packet first for FS devices */ 830 case USB_SPEED_FULL: 831 ep0_ctx->ep_info2 |= cpu_to_le32(((64 & MAX_PACKET_MASK) << 832 MAX_PACKET_SHIFT)); 833 debug("Setting Packet size = 64bytes\n"); 834 break; 835 case USB_SPEED_LOW: 836 ep0_ctx->ep_info2 |= cpu_to_le32(((8 & MAX_PACKET_MASK) << 837 MAX_PACKET_SHIFT)); 838 debug("Setting Packet size = 8bytes\n"); 839 break; 840 default: 841 /* New speed? */ 842 BUG(); 843 } 844 845 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ 846 ep0_ctx->ep_info2 |= 847 cpu_to_le32(((0 & MAX_BURST_MASK) << MAX_BURST_SHIFT) | 848 ((3 & ERROR_COUNT_MASK) << ERROR_COUNT_SHIFT)); 849 850 trb_64 = (uintptr_t)virt_dev->eps[0].ring->first_seg->trbs; 851 ep0_ctx->deq = cpu_to_le64(trb_64 | virt_dev->eps[0].ring->cycle_state); 852 853 /* 854 * xHCI spec 6.2.3: 855 * software shall set 'Average TRB Length' to 8 for control endpoints. 856 */ 857 ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(8)); 858 859 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ 860 861 xhci_flush_cache((uintptr_t)ep0_ctx, sizeof(struct xhci_ep_ctx)); 862 xhci_flush_cache((uintptr_t)slot_ctx, sizeof(struct xhci_slot_ctx)); 863 } 864