1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * USB HOST XHCI Controller stack 4 * 5 * Based on xHCI host controller driver in linux-kernel 6 * by Sarah Sharp. 7 * 8 * Copyright (C) 2008 Intel Corp. 9 * Author: Sarah Sharp 10 * 11 * Copyright (C) 2013 Samsung Electronics Co.Ltd 12 * Authors: Vivek Gautam <gautam.vivek@samsung.com> 13 * Vikas Sajjan <vikas.sajjan@samsung.com> 14 */ 15 16 #include <common.h> 17 #include <dm.h> 18 #include <asm/byteorder.h> 19 #include <usb.h> 20 #include <malloc.h> 21 #include <asm/cache.h> 22 #include <linux/errno.h> 23 24 #include "xhci.h" 25 26 #define CACHELINE_SIZE CONFIG_SYS_CACHELINE_SIZE 27 /** 28 * flushes the address passed till the length 29 * 30 * @param addr pointer to memory region to be flushed 31 * @param len the length of the cache line to be flushed 32 * @return none 33 */ 34 void xhci_flush_cache(uintptr_t addr, u32 len) 35 { 36 BUG_ON((void *)addr == NULL || len == 0); 37 38 flush_dcache_range(addr & ~(CACHELINE_SIZE - 1), 39 ALIGN(addr + len, CACHELINE_SIZE)); 40 } 41 42 /** 43 * invalidates the address passed till the length 44 * 45 * @param addr pointer to memory region to be invalidates 46 * @param len the length of the cache line to be invalidated 47 * @return none 48 */ 49 void xhci_inval_cache(uintptr_t addr, u32 len) 50 { 51 BUG_ON((void *)addr == NULL || len == 0); 52 53 invalidate_dcache_range(addr & ~(CACHELINE_SIZE - 1), 54 ALIGN(addr + len, CACHELINE_SIZE)); 55 } 56 57 58 /** 59 * frees the "segment" pointer passed 60 * 61 * @param ptr pointer to "segement" to be freed 62 * @return none 63 */ 64 static void xhci_segment_free(struct xhci_segment *seg) 65 { 66 free(seg->trbs); 67 seg->trbs = NULL; 68 69 free(seg); 70 } 71 72 /** 73 * frees the "ring" pointer passed 74 * 75 * @param ptr pointer to "ring" to be freed 76 * @return none 77 */ 78 static void xhci_ring_free(struct xhci_ring *ring) 79 { 80 struct xhci_segment *seg; 81 struct xhci_segment *first_seg; 82 83 BUG_ON(!ring); 84 85 first_seg = ring->first_seg; 86 seg = first_seg->next; 87 while (seg != first_seg) { 88 struct xhci_segment *next = seg->next; 89 xhci_segment_free(seg); 90 seg = next; 91 } 92 xhci_segment_free(first_seg); 93 94 free(ring); 95 } 96 97 /** 98 * Free the scratchpad buffer array and scratchpad buffers 99 * 100 * @ctrl host controller data structure 101 * @return none 102 */ 103 static void xhci_scratchpad_free(struct xhci_ctrl *ctrl) 104 { 105 if (!ctrl->scratchpad) 106 return; 107 108 ctrl->dcbaa->dev_context_ptrs[0] = 0; 109 110 free((void *)(uintptr_t)ctrl->scratchpad->sp_array[0]); 111 free(ctrl->scratchpad->sp_array); 112 free(ctrl->scratchpad); 113 ctrl->scratchpad = NULL; 114 } 115 116 /** 117 * frees the "xhci_container_ctx" pointer passed 118 * 119 * @param ptr pointer to "xhci_container_ctx" to be freed 120 * @return none 121 */ 122 static void xhci_free_container_ctx(struct xhci_container_ctx *ctx) 123 { 124 free(ctx->bytes); 125 free(ctx); 126 } 127 128 /** 129 * frees the virtual devices for "xhci_ctrl" pointer passed 130 * 131 * @param ptr pointer to "xhci_ctrl" whose virtual devices are to be freed 132 * @return none 133 */ 134 static void xhci_free_virt_devices(struct xhci_ctrl *ctrl) 135 { 136 int i; 137 int slot_id; 138 struct xhci_virt_device *virt_dev; 139 140 /* 141 * refactored here to loop through all virt_dev 142 * Slot ID 0 is reserved 143 */ 144 for (slot_id = 0; slot_id < MAX_HC_SLOTS; slot_id++) { 145 virt_dev = ctrl->devs[slot_id]; 146 if (!virt_dev) 147 continue; 148 149 ctrl->dcbaa->dev_context_ptrs[slot_id] = 0; 150 151 for (i = 0; i < 31; ++i) 152 if (virt_dev->eps[i].ring) 153 xhci_ring_free(virt_dev->eps[i].ring); 154 155 if (virt_dev->in_ctx) 156 xhci_free_container_ctx(virt_dev->in_ctx); 157 if (virt_dev->out_ctx) 158 xhci_free_container_ctx(virt_dev->out_ctx); 159 160 free(virt_dev); 161 /* make sure we are pointing to NULL */ 162 ctrl->devs[slot_id] = NULL; 163 } 164 } 165 166 /** 167 * frees all the memory allocated 168 * 169 * @param ptr pointer to "xhci_ctrl" to be cleaned up 170 * @return none 171 */ 172 void xhci_cleanup(struct xhci_ctrl *ctrl) 173 { 174 xhci_ring_free(ctrl->event_ring); 175 xhci_ring_free(ctrl->cmd_ring); 176 xhci_scratchpad_free(ctrl); 177 xhci_free_virt_devices(ctrl); 178 free(ctrl->erst.entries); 179 free(ctrl->dcbaa); 180 memset(ctrl, '\0', sizeof(struct xhci_ctrl)); 181 } 182 183 /** 184 * Malloc the aligned memory 185 * 186 * @param size size of memory to be allocated 187 * @return allocates the memory and returns the aligned pointer 188 */ 189 static void *xhci_malloc(unsigned int size) 190 { 191 void *ptr; 192 size_t cacheline_size = max(XHCI_ALIGNMENT, CACHELINE_SIZE); 193 194 ptr = memalign(cacheline_size, ALIGN(size, cacheline_size)); 195 BUG_ON(!ptr); 196 memset(ptr, '\0', size); 197 198 xhci_flush_cache((uintptr_t)ptr, size); 199 200 return ptr; 201 } 202 203 /** 204 * Make the prev segment point to the next segment. 205 * Change the last TRB in the prev segment to be a Link TRB which points to the 206 * address of the next segment. The caller needs to set any Link TRB 207 * related flags, such as End TRB, Toggle Cycle, and no snoop. 208 * 209 * @param prev pointer to the previous segment 210 * @param next pointer to the next segment 211 * @param link_trbs flag to indicate whether to link the trbs or NOT 212 * @return none 213 */ 214 static void xhci_link_segments(struct xhci_segment *prev, 215 struct xhci_segment *next, bool link_trbs) 216 { 217 u32 val; 218 u64 val_64 = 0; 219 220 if (!prev || !next) 221 return; 222 prev->next = next; 223 if (link_trbs) { 224 val_64 = (uintptr_t)next->trbs; 225 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = val_64; 226 227 /* 228 * Set the last TRB in the segment to 229 * have a TRB type ID of Link TRB 230 */ 231 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); 232 val &= ~TRB_TYPE_BITMASK; 233 val |= (TRB_LINK << TRB_TYPE_SHIFT); 234 235 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); 236 } 237 } 238 239 /** 240 * Initialises the Ring's enqueue,dequeue,enq_seg pointers 241 * 242 * @param ring pointer to the RING to be intialised 243 * @return none 244 */ 245 static void xhci_initialize_ring_info(struct xhci_ring *ring) 246 { 247 /* 248 * The ring is empty, so the enqueue pointer == dequeue pointer 249 */ 250 ring->enqueue = ring->first_seg->trbs; 251 ring->enq_seg = ring->first_seg; 252 ring->dequeue = ring->enqueue; 253 ring->deq_seg = ring->first_seg; 254 255 /* 256 * The ring is initialized to 0. The producer must write 1 to the 257 * cycle bit to handover ownership of the TRB, so PCS = 1. 258 * The consumer must compare CCS to the cycle bit to 259 * check ownership, so CCS = 1. 260 */ 261 ring->cycle_state = 1; 262 } 263 264 /** 265 * Allocates a generic ring segment from the ring pool, sets the dma address, 266 * initializes the segment to zero, and sets the private next pointer to NULL. 267 * Section 4.11.1.1: 268 * "All components of all Command and Transfer TRBs shall be initialized to '0'" 269 * 270 * @param none 271 * @return pointer to the newly allocated SEGMENT 272 */ 273 static struct xhci_segment *xhci_segment_alloc(void) 274 { 275 struct xhci_segment *seg; 276 277 seg = (struct xhci_segment *)malloc(sizeof(struct xhci_segment)); 278 BUG_ON(!seg); 279 280 seg->trbs = (union xhci_trb *)xhci_malloc(SEGMENT_SIZE); 281 282 seg->next = NULL; 283 284 return seg; 285 } 286 287 /** 288 * Create a new ring with zero or more segments. 289 * TODO: current code only uses one-time-allocated single-segment rings 290 * of 1KB anyway, so we might as well get rid of all the segment and 291 * linking code (and maybe increase the size a bit, e.g. 4KB). 292 * 293 * 294 * Link each segment together into a ring. 295 * Set the end flag and the cycle toggle bit on the last segment. 296 * See section 4.9.2 and figures 15 and 16 of XHCI spec rev1.0. 297 * 298 * @param num_segs number of segments in the ring 299 * @param link_trbs flag to indicate whether to link the trbs or NOT 300 * @return pointer to the newly created RING 301 */ 302 struct xhci_ring *xhci_ring_alloc(unsigned int num_segs, bool link_trbs) 303 { 304 struct xhci_ring *ring; 305 struct xhci_segment *prev; 306 307 ring = (struct xhci_ring *)malloc(sizeof(struct xhci_ring)); 308 BUG_ON(!ring); 309 310 if (num_segs == 0) 311 return ring; 312 313 ring->first_seg = xhci_segment_alloc(); 314 BUG_ON(!ring->first_seg); 315 316 num_segs--; 317 318 prev = ring->first_seg; 319 while (num_segs > 0) { 320 struct xhci_segment *next; 321 322 next = xhci_segment_alloc(); 323 BUG_ON(!next); 324 325 xhci_link_segments(prev, next, link_trbs); 326 327 prev = next; 328 num_segs--; 329 } 330 xhci_link_segments(prev, ring->first_seg, link_trbs); 331 if (link_trbs) { 332 /* See section 4.9.2.1 and 6.4.4.1 */ 333 prev->trbs[TRBS_PER_SEGMENT-1].link.control |= 334 cpu_to_le32(LINK_TOGGLE); 335 } 336 xhci_initialize_ring_info(ring); 337 338 return ring; 339 } 340 341 /** 342 * Set up the scratchpad buffer array and scratchpad buffers 343 * 344 * @ctrl host controller data structure 345 * @return -ENOMEM if buffer allocation fails, 0 on success 346 */ 347 static int xhci_scratchpad_alloc(struct xhci_ctrl *ctrl) 348 { 349 struct xhci_hccr *hccr = ctrl->hccr; 350 struct xhci_hcor *hcor = ctrl->hcor; 351 struct xhci_scratchpad *scratchpad; 352 int num_sp; 353 uint32_t page_size; 354 void *buf; 355 int i; 356 357 num_sp = HCS_MAX_SCRATCHPAD(xhci_readl(&hccr->cr_hcsparams2)); 358 if (!num_sp) 359 return 0; 360 361 scratchpad = malloc(sizeof(*scratchpad)); 362 if (!scratchpad) 363 goto fail_sp; 364 ctrl->scratchpad = scratchpad; 365 366 scratchpad->sp_array = xhci_malloc(num_sp * sizeof(u64)); 367 if (!scratchpad->sp_array) 368 goto fail_sp2; 369 ctrl->dcbaa->dev_context_ptrs[0] = 370 cpu_to_le64((uintptr_t)scratchpad->sp_array); 371 372 page_size = xhci_readl(&hcor->or_pagesize) & 0xffff; 373 for (i = 0; i < 16; i++) { 374 if ((0x1 & page_size) != 0) 375 break; 376 page_size = page_size >> 1; 377 } 378 BUG_ON(i == 16); 379 380 page_size = 1 << (i + 12); 381 buf = memalign(page_size, num_sp * page_size); 382 if (!buf) 383 goto fail_sp3; 384 memset(buf, '\0', num_sp * page_size); 385 xhci_flush_cache((uintptr_t)buf, num_sp * page_size); 386 387 for (i = 0; i < num_sp; i++) { 388 uintptr_t ptr = (uintptr_t)buf + i * page_size; 389 scratchpad->sp_array[i] = cpu_to_le64(ptr); 390 } 391 392 return 0; 393 394 fail_sp3: 395 free(scratchpad->sp_array); 396 397 fail_sp2: 398 free(scratchpad); 399 ctrl->scratchpad = NULL; 400 401 fail_sp: 402 return -ENOMEM; 403 } 404 405 /** 406 * Allocates the Container context 407 * 408 * @param ctrl Host controller data structure 409 * @param type type of XHCI Container Context 410 * @return NULL if failed else pointer to the context on success 411 */ 412 static struct xhci_container_ctx 413 *xhci_alloc_container_ctx(struct xhci_ctrl *ctrl, int type) 414 { 415 struct xhci_container_ctx *ctx; 416 417 ctx = (struct xhci_container_ctx *) 418 malloc(sizeof(struct xhci_container_ctx)); 419 BUG_ON(!ctx); 420 421 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); 422 ctx->type = type; 423 ctx->size = (MAX_EP_CTX_NUM + 1) * 424 CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)); 425 if (type == XHCI_CTX_TYPE_INPUT) 426 ctx->size += CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)); 427 428 ctx->bytes = (u8 *)xhci_malloc(ctx->size); 429 430 return ctx; 431 } 432 433 /** 434 * Allocating virtual device 435 * 436 * @param udev pointer to USB deivce structure 437 * @return 0 on success else -1 on failure 438 */ 439 int xhci_alloc_virt_device(struct xhci_ctrl *ctrl, unsigned int slot_id) 440 { 441 u64 byte_64 = 0; 442 struct xhci_virt_device *virt_dev; 443 444 /* Slot ID 0 is reserved */ 445 if (ctrl->devs[slot_id]) { 446 printf("Virt dev for slot[%d] already allocated\n", slot_id); 447 return -EEXIST; 448 } 449 450 ctrl->devs[slot_id] = (struct xhci_virt_device *) 451 malloc(sizeof(struct xhci_virt_device)); 452 453 if (!ctrl->devs[slot_id]) { 454 puts("Failed to allocate virtual device\n"); 455 return -ENOMEM; 456 } 457 458 memset(ctrl->devs[slot_id], 0, sizeof(struct xhci_virt_device)); 459 virt_dev = ctrl->devs[slot_id]; 460 461 /* Allocate the (output) device context that will be used in the HC. */ 462 virt_dev->out_ctx = xhci_alloc_container_ctx(ctrl, 463 XHCI_CTX_TYPE_DEVICE); 464 if (!virt_dev->out_ctx) { 465 puts("Failed to allocate out context for virt dev\n"); 466 return -ENOMEM; 467 } 468 469 /* Allocate the (input) device context for address device command */ 470 virt_dev->in_ctx = xhci_alloc_container_ctx(ctrl, 471 XHCI_CTX_TYPE_INPUT); 472 if (!virt_dev->in_ctx) { 473 puts("Failed to allocate in context for virt dev\n"); 474 return -ENOMEM; 475 } 476 477 /* Allocate endpoint 0 ring */ 478 virt_dev->eps[0].ring = xhci_ring_alloc(1, true); 479 480 byte_64 = (uintptr_t)(virt_dev->out_ctx->bytes); 481 482 /* Point to output device context in dcbaa. */ 483 ctrl->dcbaa->dev_context_ptrs[slot_id] = byte_64; 484 485 xhci_flush_cache((uintptr_t)&ctrl->dcbaa->dev_context_ptrs[slot_id], 486 sizeof(__le64)); 487 return 0; 488 } 489 490 /** 491 * Allocates the necessary data structures 492 * for XHCI host controller 493 * 494 * @param ctrl Host controller data structure 495 * @param hccr pointer to HOST Controller Control Registers 496 * @param hcor pointer to HOST Controller Operational Registers 497 * @return 0 if successful else -1 on failure 498 */ 499 int xhci_mem_init(struct xhci_ctrl *ctrl, struct xhci_hccr *hccr, 500 struct xhci_hcor *hcor) 501 { 502 uint64_t val_64; 503 uint64_t trb_64; 504 uint32_t val; 505 unsigned long deq; 506 int i; 507 struct xhci_segment *seg; 508 509 /* DCBAA initialization */ 510 ctrl->dcbaa = (struct xhci_device_context_array *) 511 xhci_malloc(sizeof(struct xhci_device_context_array)); 512 if (ctrl->dcbaa == NULL) { 513 puts("unable to allocate DCBA\n"); 514 return -ENOMEM; 515 } 516 517 val_64 = (uintptr_t)ctrl->dcbaa; 518 /* Set the pointer in DCBAA register */ 519 xhci_writeq(&hcor->or_dcbaap, val_64); 520 521 /* Command ring control pointer register initialization */ 522 ctrl->cmd_ring = xhci_ring_alloc(1, true); 523 524 /* Set the address in the Command Ring Control register */ 525 trb_64 = (uintptr_t)ctrl->cmd_ring->first_seg->trbs; 526 val_64 = xhci_readq(&hcor->or_crcr); 527 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 528 (trb_64 & (u64) ~CMD_RING_RSVD_BITS) | 529 ctrl->cmd_ring->cycle_state; 530 xhci_writeq(&hcor->or_crcr, val_64); 531 532 /* write the address of db register */ 533 val = xhci_readl(&hccr->cr_dboff); 534 val &= DBOFF_MASK; 535 ctrl->dba = (struct xhci_doorbell_array *)((char *)hccr + val); 536 537 /* write the address of runtime register */ 538 val = xhci_readl(&hccr->cr_rtsoff); 539 val &= RTSOFF_MASK; 540 ctrl->run_regs = (struct xhci_run_regs *)((char *)hccr + val); 541 542 /* writting the address of ir_set structure */ 543 ctrl->ir_set = &ctrl->run_regs->ir_set[0]; 544 545 /* Event ring does not maintain link TRB */ 546 ctrl->event_ring = xhci_ring_alloc(ERST_NUM_SEGS, false); 547 ctrl->erst.entries = (struct xhci_erst_entry *) 548 xhci_malloc(sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS); 549 550 ctrl->erst.num_entries = ERST_NUM_SEGS; 551 552 for (val = 0, seg = ctrl->event_ring->first_seg; 553 val < ERST_NUM_SEGS; 554 val++) { 555 trb_64 = 0; 556 trb_64 = (uintptr_t)seg->trbs; 557 struct xhci_erst_entry *entry = &ctrl->erst.entries[val]; 558 xhci_writeq(&entry->seg_addr, trb_64); 559 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); 560 entry->rsvd = 0; 561 seg = seg->next; 562 } 563 xhci_flush_cache((uintptr_t)ctrl->erst.entries, 564 ERST_NUM_SEGS * sizeof(struct xhci_erst_entry)); 565 566 deq = (unsigned long)ctrl->event_ring->dequeue; 567 568 /* Update HC event ring dequeue pointer */ 569 xhci_writeq(&ctrl->ir_set->erst_dequeue, 570 (u64)deq & (u64)~ERST_PTR_MASK); 571 572 /* set ERST count with the number of entries in the segment table */ 573 val = xhci_readl(&ctrl->ir_set->erst_size); 574 val &= ERST_SIZE_MASK; 575 val |= ERST_NUM_SEGS; 576 xhci_writel(&ctrl->ir_set->erst_size, val); 577 578 /* this is the event ring segment table pointer */ 579 val_64 = xhci_readq(&ctrl->ir_set->erst_base); 580 val_64 &= ERST_PTR_MASK; 581 val_64 |= ((uintptr_t)(ctrl->erst.entries) & ~ERST_PTR_MASK); 582 583 xhci_writeq(&ctrl->ir_set->erst_base, val_64); 584 585 /* set up the scratchpad buffer array and scratchpad buffers */ 586 xhci_scratchpad_alloc(ctrl); 587 588 /* initializing the virtual devices to NULL */ 589 for (i = 0; i < MAX_HC_SLOTS; ++i) 590 ctrl->devs[i] = NULL; 591 592 /* 593 * Just Zero'ing this register completely, 594 * or some spurious Device Notification Events 595 * might screw things here. 596 */ 597 xhci_writel(&hcor->or_dnctrl, 0x0); 598 599 return 0; 600 } 601 602 /** 603 * Give the input control context for the passed container context 604 * 605 * @param ctx pointer to the context 606 * @return pointer to the Input control context data 607 */ 608 struct xhci_input_control_ctx 609 *xhci_get_input_control_ctx(struct xhci_container_ctx *ctx) 610 { 611 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); 612 return (struct xhci_input_control_ctx *)ctx->bytes; 613 } 614 615 /** 616 * Give the slot context for the passed container context 617 * 618 * @param ctrl Host controller data structure 619 * @param ctx pointer to the context 620 * @return pointer to the slot control context data 621 */ 622 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_ctrl *ctrl, 623 struct xhci_container_ctx *ctx) 624 { 625 if (ctx->type == XHCI_CTX_TYPE_DEVICE) 626 return (struct xhci_slot_ctx *)ctx->bytes; 627 628 return (struct xhci_slot_ctx *) 629 (ctx->bytes + CTX_SIZE(readl(&ctrl->hccr->cr_hccparams))); 630 } 631 632 /** 633 * Gets the EP context from based on the ep_index 634 * 635 * @param ctrl Host controller data structure 636 * @param ctx context container 637 * @param ep_index index of the endpoint 638 * @return pointer to the End point context 639 */ 640 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_ctrl *ctrl, 641 struct xhci_container_ctx *ctx, 642 unsigned int ep_index) 643 { 644 /* increment ep index by offset of start of ep ctx array */ 645 ep_index++; 646 if (ctx->type == XHCI_CTX_TYPE_INPUT) 647 ep_index++; 648 649 return (struct xhci_ep_ctx *) 650 (ctx->bytes + 651 (ep_index * CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)))); 652 } 653 654 /** 655 * Copy output xhci_ep_ctx to the input xhci_ep_ctx copy. 656 * Useful when you want to change one particular aspect of the endpoint 657 * and then issue a configure endpoint command. 658 * 659 * @param ctrl Host controller data structure 660 * @param in_ctx contains the input context 661 * @param out_ctx contains the input context 662 * @param ep_index index of the end point 663 * @return none 664 */ 665 void xhci_endpoint_copy(struct xhci_ctrl *ctrl, 666 struct xhci_container_ctx *in_ctx, 667 struct xhci_container_ctx *out_ctx, 668 unsigned int ep_index) 669 { 670 struct xhci_ep_ctx *out_ep_ctx; 671 struct xhci_ep_ctx *in_ep_ctx; 672 673 out_ep_ctx = xhci_get_ep_ctx(ctrl, out_ctx, ep_index); 674 in_ep_ctx = xhci_get_ep_ctx(ctrl, in_ctx, ep_index); 675 676 in_ep_ctx->ep_info = out_ep_ctx->ep_info; 677 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; 678 in_ep_ctx->deq = out_ep_ctx->deq; 679 in_ep_ctx->tx_info = out_ep_ctx->tx_info; 680 } 681 682 /** 683 * Copy output xhci_slot_ctx to the input xhci_slot_ctx. 684 * Useful when you want to change one particular aspect of the endpoint 685 * and then issue a configure endpoint command. 686 * Only the context entries field matters, but 687 * we'll copy the whole thing anyway. 688 * 689 * @param ctrl Host controller data structure 690 * @param in_ctx contains the inpout context 691 * @param out_ctx contains the inpout context 692 * @return none 693 */ 694 void xhci_slot_copy(struct xhci_ctrl *ctrl, struct xhci_container_ctx *in_ctx, 695 struct xhci_container_ctx *out_ctx) 696 { 697 struct xhci_slot_ctx *in_slot_ctx; 698 struct xhci_slot_ctx *out_slot_ctx; 699 700 in_slot_ctx = xhci_get_slot_ctx(ctrl, in_ctx); 701 out_slot_ctx = xhci_get_slot_ctx(ctrl, out_ctx); 702 703 in_slot_ctx->dev_info = out_slot_ctx->dev_info; 704 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; 705 in_slot_ctx->tt_info = out_slot_ctx->tt_info; 706 in_slot_ctx->dev_state = out_slot_ctx->dev_state; 707 } 708 709 /** 710 * Setup an xHCI virtual device for a Set Address command 711 * 712 * @param udev pointer to the Device Data Structure 713 * @return returns negative value on failure else 0 on success 714 */ 715 void xhci_setup_addressable_virt_dev(struct xhci_ctrl *ctrl, 716 struct usb_device *udev, int hop_portnr) 717 { 718 struct xhci_virt_device *virt_dev; 719 struct xhci_ep_ctx *ep0_ctx; 720 struct xhci_slot_ctx *slot_ctx; 721 u32 port_num = 0; 722 u64 trb_64 = 0; 723 int slot_id = udev->slot_id; 724 int speed = udev->speed; 725 int route = 0; 726 #ifdef CONFIG_DM_USB 727 struct usb_device *dev = udev; 728 struct usb_hub_device *hub; 729 #endif 730 731 virt_dev = ctrl->devs[slot_id]; 732 733 BUG_ON(!virt_dev); 734 735 /* Extract the EP0 and Slot Ctrl */ 736 ep0_ctx = xhci_get_ep_ctx(ctrl, virt_dev->in_ctx, 0); 737 slot_ctx = xhci_get_slot_ctx(ctrl, virt_dev->in_ctx); 738 739 /* Only the control endpoint is valid - one endpoint context */ 740 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); 741 742 #ifdef CONFIG_DM_USB 743 /* Calculate the route string for this device */ 744 port_num = dev->portnr; 745 while (!usb_hub_is_root_hub(dev->dev)) { 746 hub = dev_get_uclass_priv(dev->dev); 747 /* 748 * Each hub in the topology is expected to have no more than 749 * 15 ports in order for the route string of a device to be 750 * unique. SuperSpeed hubs are restricted to only having 15 751 * ports, but FS/LS/HS hubs are not. The xHCI specification 752 * says that if the port number the device is greater than 15, 753 * that portion of the route string shall be set to 15. 754 */ 755 if (port_num > 15) 756 port_num = 15; 757 route |= port_num << (hub->hub_depth * 4); 758 dev = dev_get_parent_priv(dev->dev); 759 port_num = dev->portnr; 760 dev = dev_get_parent_priv(dev->dev->parent); 761 } 762 763 debug("route string %x\n", route); 764 #endif 765 slot_ctx->dev_info |= route; 766 767 switch (speed) { 768 case USB_SPEED_SUPER: 769 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS); 770 break; 771 case USB_SPEED_HIGH: 772 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS); 773 break; 774 case USB_SPEED_FULL: 775 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS); 776 break; 777 case USB_SPEED_LOW: 778 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS); 779 break; 780 default: 781 /* Speed was set earlier, this shouldn't happen. */ 782 BUG(); 783 } 784 785 #ifdef CONFIG_DM_USB 786 /* Set up TT fields to support FS/LS devices */ 787 if (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) { 788 struct udevice *parent = udev->dev; 789 790 dev = udev; 791 do { 792 port_num = dev->portnr; 793 dev = dev_get_parent_priv(parent); 794 if (usb_hub_is_root_hub(dev->dev)) 795 break; 796 parent = dev->dev->parent; 797 } while (dev->speed != USB_SPEED_HIGH); 798 799 if (!usb_hub_is_root_hub(dev->dev)) { 800 hub = dev_get_uclass_priv(dev->dev); 801 if (hub->tt.multi) 802 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 803 slot_ctx->tt_info |= cpu_to_le32(TT_PORT(port_num)); 804 slot_ctx->tt_info |= cpu_to_le32(TT_SLOT(dev->slot_id)); 805 } 806 } 807 #endif 808 809 port_num = hop_portnr; 810 debug("port_num = %d\n", port_num); 811 812 slot_ctx->dev_info2 |= 813 cpu_to_le32(((port_num & ROOT_HUB_PORT_MASK) << 814 ROOT_HUB_PORT_SHIFT)); 815 816 /* Step 4 - ring already allocated */ 817 /* Step 5 */ 818 ep0_ctx->ep_info2 = cpu_to_le32(CTRL_EP << EP_TYPE_SHIFT); 819 debug("SPEED = %d\n", speed); 820 821 switch (speed) { 822 case USB_SPEED_SUPER: 823 ep0_ctx->ep_info2 |= cpu_to_le32(((512 & MAX_PACKET_MASK) << 824 MAX_PACKET_SHIFT)); 825 debug("Setting Packet size = 512bytes\n"); 826 break; 827 case USB_SPEED_HIGH: 828 /* USB core guesses at a 64-byte max packet first for FS devices */ 829 case USB_SPEED_FULL: 830 ep0_ctx->ep_info2 |= cpu_to_le32(((64 & MAX_PACKET_MASK) << 831 MAX_PACKET_SHIFT)); 832 debug("Setting Packet size = 64bytes\n"); 833 break; 834 case USB_SPEED_LOW: 835 ep0_ctx->ep_info2 |= cpu_to_le32(((8 & MAX_PACKET_MASK) << 836 MAX_PACKET_SHIFT)); 837 debug("Setting Packet size = 8bytes\n"); 838 break; 839 default: 840 /* New speed? */ 841 BUG(); 842 } 843 844 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ 845 ep0_ctx->ep_info2 |= 846 cpu_to_le32(((0 & MAX_BURST_MASK) << MAX_BURST_SHIFT) | 847 ((3 & ERROR_COUNT_MASK) << ERROR_COUNT_SHIFT)); 848 849 trb_64 = (uintptr_t)virt_dev->eps[0].ring->first_seg->trbs; 850 ep0_ctx->deq = cpu_to_le64(trb_64 | virt_dev->eps[0].ring->cycle_state); 851 852 /* 853 * xHCI spec 6.2.3: 854 * software shall set 'Average TRB Length' to 8 for control endpoints. 855 */ 856 ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(8)); 857 858 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ 859 860 xhci_flush_cache((uintptr_t)ep0_ctx, sizeof(struct xhci_ep_ctx)); 861 xhci_flush_cache((uintptr_t)slot_ctx, sizeof(struct xhci_slot_ctx)); 862 } 863