1 /* 2 * USB HOST XHCI Controller stack 3 * 4 * Based on xHCI host controller driver in linux-kernel 5 * by Sarah Sharp. 6 * 7 * Copyright (C) 2008 Intel Corp. 8 * Author: Sarah Sharp 9 * 10 * Copyright (C) 2013 Samsung Electronics Co.Ltd 11 * Authors: Vivek Gautam <gautam.vivek@samsung.com> 12 * Vikas Sajjan <vikas.sajjan@samsung.com> 13 * 14 * SPDX-License-Identifier: GPL-2.0+ 15 */ 16 17 #include <common.h> 18 #include <asm/byteorder.h> 19 #include <usb.h> 20 #include <malloc.h> 21 #include <asm/cache.h> 22 #include <asm-generic/errno.h> 23 24 #include "xhci.h" 25 26 #define CACHELINE_SIZE CONFIG_SYS_CACHELINE_SIZE 27 /** 28 * flushes the address passed till the length 29 * 30 * @param addr pointer to memory region to be flushed 31 * @param len the length of the cache line to be flushed 32 * @return none 33 */ 34 void xhci_flush_cache(uint32_t addr, u32 len) 35 { 36 BUG_ON((void *)addr == NULL || len == 0); 37 38 flush_dcache_range(addr & ~(CACHELINE_SIZE - 1), 39 ALIGN(addr + len, CACHELINE_SIZE)); 40 } 41 42 /** 43 * invalidates the address passed till the length 44 * 45 * @param addr pointer to memory region to be invalidates 46 * @param len the length of the cache line to be invalidated 47 * @return none 48 */ 49 void xhci_inval_cache(uint32_t addr, u32 len) 50 { 51 BUG_ON((void *)addr == NULL || len == 0); 52 53 invalidate_dcache_range(addr & ~(CACHELINE_SIZE - 1), 54 ALIGN(addr + len, CACHELINE_SIZE)); 55 } 56 57 58 /** 59 * frees the "segment" pointer passed 60 * 61 * @param ptr pointer to "segement" to be freed 62 * @return none 63 */ 64 static void xhci_segment_free(struct xhci_segment *seg) 65 { 66 free(seg->trbs); 67 seg->trbs = NULL; 68 69 free(seg); 70 } 71 72 /** 73 * frees the "ring" pointer passed 74 * 75 * @param ptr pointer to "ring" to be freed 76 * @return none 77 */ 78 static void xhci_ring_free(struct xhci_ring *ring) 79 { 80 struct xhci_segment *seg; 81 struct xhci_segment *first_seg; 82 83 BUG_ON(!ring); 84 85 first_seg = ring->first_seg; 86 seg = first_seg->next; 87 while (seg != first_seg) { 88 struct xhci_segment *next = seg->next; 89 xhci_segment_free(seg); 90 seg = next; 91 } 92 xhci_segment_free(first_seg); 93 94 free(ring); 95 } 96 97 /** 98 * frees the "xhci_container_ctx" pointer passed 99 * 100 * @param ptr pointer to "xhci_container_ctx" to be freed 101 * @return none 102 */ 103 static void xhci_free_container_ctx(struct xhci_container_ctx *ctx) 104 { 105 free(ctx->bytes); 106 free(ctx); 107 } 108 109 /** 110 * frees the virtual devices for "xhci_ctrl" pointer passed 111 * 112 * @param ptr pointer to "xhci_ctrl" whose virtual devices are to be freed 113 * @return none 114 */ 115 static void xhci_free_virt_devices(struct xhci_ctrl *ctrl) 116 { 117 int i; 118 int slot_id; 119 struct xhci_virt_device *virt_dev; 120 121 /* 122 * refactored here to loop through all virt_dev 123 * Slot ID 0 is reserved 124 */ 125 for (slot_id = 0; slot_id < MAX_HC_SLOTS; slot_id++) { 126 virt_dev = ctrl->devs[slot_id]; 127 if (!virt_dev) 128 continue; 129 130 ctrl->dcbaa->dev_context_ptrs[slot_id] = 0; 131 132 for (i = 0; i < 31; ++i) 133 if (virt_dev->eps[i].ring) 134 xhci_ring_free(virt_dev->eps[i].ring); 135 136 if (virt_dev->in_ctx) 137 xhci_free_container_ctx(virt_dev->in_ctx); 138 if (virt_dev->out_ctx) 139 xhci_free_container_ctx(virt_dev->out_ctx); 140 141 free(virt_dev); 142 /* make sure we are pointing to NULL */ 143 ctrl->devs[slot_id] = NULL; 144 } 145 } 146 147 /** 148 * frees all the memory allocated 149 * 150 * @param ptr pointer to "xhci_ctrl" to be cleaned up 151 * @return none 152 */ 153 void xhci_cleanup(struct xhci_ctrl *ctrl) 154 { 155 xhci_ring_free(ctrl->event_ring); 156 xhci_ring_free(ctrl->cmd_ring); 157 xhci_free_virt_devices(ctrl); 158 free(ctrl->erst.entries); 159 free(ctrl->dcbaa); 160 memset(ctrl, '\0', sizeof(struct xhci_ctrl)); 161 } 162 163 /** 164 * Malloc the aligned memory 165 * 166 * @param size size of memory to be allocated 167 * @return allocates the memory and returns the aligned pointer 168 */ 169 static void *xhci_malloc(unsigned int size) 170 { 171 void *ptr; 172 size_t cacheline_size = max(XHCI_ALIGNMENT, CACHELINE_SIZE); 173 174 ptr = memalign(cacheline_size, ALIGN(size, cacheline_size)); 175 BUG_ON(!ptr); 176 memset(ptr, '\0', size); 177 178 xhci_flush_cache((uint32_t)ptr, size); 179 180 return ptr; 181 } 182 183 /** 184 * Make the prev segment point to the next segment. 185 * Change the last TRB in the prev segment to be a Link TRB which points to the 186 * address of the next segment. The caller needs to set any Link TRB 187 * related flags, such as End TRB, Toggle Cycle, and no snoop. 188 * 189 * @param prev pointer to the previous segment 190 * @param next pointer to the next segment 191 * @param link_trbs flag to indicate whether to link the trbs or NOT 192 * @return none 193 */ 194 static void xhci_link_segments(struct xhci_segment *prev, 195 struct xhci_segment *next, bool link_trbs) 196 { 197 u32 val; 198 u64 val_64 = 0; 199 200 if (!prev || !next) 201 return; 202 prev->next = next; 203 if (link_trbs) { 204 val_64 = (uintptr_t)next->trbs; 205 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = val_64; 206 207 /* 208 * Set the last TRB in the segment to 209 * have a TRB type ID of Link TRB 210 */ 211 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); 212 val &= ~TRB_TYPE_BITMASK; 213 val |= (TRB_LINK << TRB_TYPE_SHIFT); 214 215 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); 216 } 217 } 218 219 /** 220 * Initialises the Ring's enqueue,dequeue,enq_seg pointers 221 * 222 * @param ring pointer to the RING to be intialised 223 * @return none 224 */ 225 static void xhci_initialize_ring_info(struct xhci_ring *ring) 226 { 227 /* 228 * The ring is empty, so the enqueue pointer == dequeue pointer 229 */ 230 ring->enqueue = ring->first_seg->trbs; 231 ring->enq_seg = ring->first_seg; 232 ring->dequeue = ring->enqueue; 233 ring->deq_seg = ring->first_seg; 234 235 /* 236 * The ring is initialized to 0. The producer must write 1 to the 237 * cycle bit to handover ownership of the TRB, so PCS = 1. 238 * The consumer must compare CCS to the cycle bit to 239 * check ownership, so CCS = 1. 240 */ 241 ring->cycle_state = 1; 242 } 243 244 /** 245 * Allocates a generic ring segment from the ring pool, sets the dma address, 246 * initializes the segment to zero, and sets the private next pointer to NULL. 247 * Section 4.11.1.1: 248 * "All components of all Command and Transfer TRBs shall be initialized to '0'" 249 * 250 * @param none 251 * @return pointer to the newly allocated SEGMENT 252 */ 253 static struct xhci_segment *xhci_segment_alloc(void) 254 { 255 struct xhci_segment *seg; 256 257 seg = (struct xhci_segment *)malloc(sizeof(struct xhci_segment)); 258 BUG_ON(!seg); 259 260 seg->trbs = (union xhci_trb *)xhci_malloc(SEGMENT_SIZE); 261 262 seg->next = NULL; 263 264 return seg; 265 } 266 267 /** 268 * Create a new ring with zero or more segments. 269 * TODO: current code only uses one-time-allocated single-segment rings 270 * of 1KB anyway, so we might as well get rid of all the segment and 271 * linking code (and maybe increase the size a bit, e.g. 4KB). 272 * 273 * 274 * Link each segment together into a ring. 275 * Set the end flag and the cycle toggle bit on the last segment. 276 * See section 4.9.2 and figures 15 and 16 of XHCI spec rev1.0. 277 * 278 * @param num_segs number of segments in the ring 279 * @param link_trbs flag to indicate whether to link the trbs or NOT 280 * @return pointer to the newly created RING 281 */ 282 struct xhci_ring *xhci_ring_alloc(unsigned int num_segs, bool link_trbs) 283 { 284 struct xhci_ring *ring; 285 struct xhci_segment *prev; 286 287 ring = (struct xhci_ring *)malloc(sizeof(struct xhci_ring)); 288 BUG_ON(!ring); 289 290 if (num_segs == 0) 291 return ring; 292 293 ring->first_seg = xhci_segment_alloc(); 294 BUG_ON(!ring->first_seg); 295 296 num_segs--; 297 298 prev = ring->first_seg; 299 while (num_segs > 0) { 300 struct xhci_segment *next; 301 302 next = xhci_segment_alloc(); 303 BUG_ON(!next); 304 305 xhci_link_segments(prev, next, link_trbs); 306 307 prev = next; 308 num_segs--; 309 } 310 xhci_link_segments(prev, ring->first_seg, link_trbs); 311 if (link_trbs) { 312 /* See section 4.9.2.1 and 6.4.4.1 */ 313 prev->trbs[TRBS_PER_SEGMENT-1].link.control |= 314 cpu_to_le32(LINK_TOGGLE); 315 } 316 xhci_initialize_ring_info(ring); 317 318 return ring; 319 } 320 321 /** 322 * Allocates the Container context 323 * 324 * @param ctrl Host controller data structure 325 * @param type type of XHCI Container Context 326 * @return NULL if failed else pointer to the context on success 327 */ 328 static struct xhci_container_ctx 329 *xhci_alloc_container_ctx(struct xhci_ctrl *ctrl, int type) 330 { 331 struct xhci_container_ctx *ctx; 332 333 ctx = (struct xhci_container_ctx *) 334 malloc(sizeof(struct xhci_container_ctx)); 335 BUG_ON(!ctx); 336 337 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); 338 ctx->type = type; 339 ctx->size = (MAX_EP_CTX_NUM + 1) * 340 CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)); 341 if (type == XHCI_CTX_TYPE_INPUT) 342 ctx->size += CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)); 343 344 ctx->bytes = (u8 *)xhci_malloc(ctx->size); 345 346 return ctx; 347 } 348 349 /** 350 * Allocating virtual device 351 * 352 * @param udev pointer to USB deivce structure 353 * @return 0 on success else -1 on failure 354 */ 355 int xhci_alloc_virt_device(struct usb_device *udev) 356 { 357 u64 byte_64 = 0; 358 unsigned int slot_id = udev->slot_id; 359 struct xhci_virt_device *virt_dev; 360 struct xhci_ctrl *ctrl = udev->controller; 361 362 /* Slot ID 0 is reserved */ 363 if (ctrl->devs[slot_id]) { 364 printf("Virt dev for slot[%d] already allocated\n", slot_id); 365 return -EEXIST; 366 } 367 368 ctrl->devs[slot_id] = (struct xhci_virt_device *) 369 malloc(sizeof(struct xhci_virt_device)); 370 371 if (!ctrl->devs[slot_id]) { 372 puts("Failed to allocate virtual device\n"); 373 return -ENOMEM; 374 } 375 376 memset(ctrl->devs[slot_id], 0, sizeof(struct xhci_virt_device)); 377 virt_dev = ctrl->devs[slot_id]; 378 379 /* Allocate the (output) device context that will be used in the HC. */ 380 virt_dev->out_ctx = xhci_alloc_container_ctx(ctrl, 381 XHCI_CTX_TYPE_DEVICE); 382 if (!virt_dev->out_ctx) { 383 puts("Failed to allocate out context for virt dev\n"); 384 return -ENOMEM; 385 } 386 387 /* Allocate the (input) device context for address device command */ 388 virt_dev->in_ctx = xhci_alloc_container_ctx(ctrl, 389 XHCI_CTX_TYPE_INPUT); 390 if (!virt_dev->in_ctx) { 391 puts("Failed to allocate in context for virt dev\n"); 392 return -ENOMEM; 393 } 394 395 /* Allocate endpoint 0 ring */ 396 virt_dev->eps[0].ring = xhci_ring_alloc(1, true); 397 398 byte_64 = (uintptr_t)(virt_dev->out_ctx->bytes); 399 400 /* Point to output device context in dcbaa. */ 401 ctrl->dcbaa->dev_context_ptrs[slot_id] = byte_64; 402 403 xhci_flush_cache((uint32_t)&ctrl->dcbaa->dev_context_ptrs[slot_id], 404 sizeof(__le64)); 405 return 0; 406 } 407 408 /** 409 * Allocates the necessary data structures 410 * for XHCI host controller 411 * 412 * @param ctrl Host controller data structure 413 * @param hccr pointer to HOST Controller Control Registers 414 * @param hcor pointer to HOST Controller Operational Registers 415 * @return 0 if successful else -1 on failure 416 */ 417 int xhci_mem_init(struct xhci_ctrl *ctrl, struct xhci_hccr *hccr, 418 struct xhci_hcor *hcor) 419 { 420 uint64_t val_64; 421 uint64_t trb_64; 422 uint32_t val; 423 unsigned long deq; 424 int i; 425 struct xhci_segment *seg; 426 427 /* DCBAA initialization */ 428 ctrl->dcbaa = (struct xhci_device_context_array *) 429 xhci_malloc(sizeof(struct xhci_device_context_array)); 430 if (ctrl->dcbaa == NULL) { 431 puts("unable to allocate DCBA\n"); 432 return -ENOMEM; 433 } 434 435 val_64 = (uintptr_t)ctrl->dcbaa; 436 /* Set the pointer in DCBAA register */ 437 xhci_writeq(&hcor->or_dcbaap, val_64); 438 439 /* Command ring control pointer register initialization */ 440 ctrl->cmd_ring = xhci_ring_alloc(1, true); 441 442 /* Set the address in the Command Ring Control register */ 443 trb_64 = (uintptr_t)ctrl->cmd_ring->first_seg->trbs; 444 val_64 = xhci_readq(&hcor->or_crcr); 445 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 446 (trb_64 & (u64) ~CMD_RING_RSVD_BITS) | 447 ctrl->cmd_ring->cycle_state; 448 xhci_writeq(&hcor->or_crcr, val_64); 449 450 /* write the address of db register */ 451 val = xhci_readl(&hccr->cr_dboff); 452 val &= DBOFF_MASK; 453 ctrl->dba = (struct xhci_doorbell_array *)((char *)hccr + val); 454 455 /* write the address of runtime register */ 456 val = xhci_readl(&hccr->cr_rtsoff); 457 val &= RTSOFF_MASK; 458 ctrl->run_regs = (struct xhci_run_regs *)((char *)hccr + val); 459 460 /* writting the address of ir_set structure */ 461 ctrl->ir_set = &ctrl->run_regs->ir_set[0]; 462 463 /* Event ring does not maintain link TRB */ 464 ctrl->event_ring = xhci_ring_alloc(ERST_NUM_SEGS, false); 465 ctrl->erst.entries = (struct xhci_erst_entry *) 466 xhci_malloc(sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS); 467 468 ctrl->erst.num_entries = ERST_NUM_SEGS; 469 470 for (val = 0, seg = ctrl->event_ring->first_seg; 471 val < ERST_NUM_SEGS; 472 val++) { 473 trb_64 = 0; 474 trb_64 = (uintptr_t)seg->trbs; 475 struct xhci_erst_entry *entry = &ctrl->erst.entries[val]; 476 xhci_writeq(&entry->seg_addr, trb_64); 477 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); 478 entry->rsvd = 0; 479 seg = seg->next; 480 } 481 xhci_flush_cache((uint32_t)ctrl->erst.entries, 482 ERST_NUM_SEGS * sizeof(struct xhci_erst_entry)); 483 484 deq = (unsigned long)ctrl->event_ring->dequeue; 485 486 /* Update HC event ring dequeue pointer */ 487 xhci_writeq(&ctrl->ir_set->erst_dequeue, 488 (u64)deq & (u64)~ERST_PTR_MASK); 489 490 /* set ERST count with the number of entries in the segment table */ 491 val = xhci_readl(&ctrl->ir_set->erst_size); 492 val &= ERST_SIZE_MASK; 493 val |= ERST_NUM_SEGS; 494 xhci_writel(&ctrl->ir_set->erst_size, val); 495 496 /* this is the event ring segment table pointer */ 497 val_64 = xhci_readq(&ctrl->ir_set->erst_base); 498 val_64 &= ERST_PTR_MASK; 499 val_64 |= ((u32)(ctrl->erst.entries) & ~ERST_PTR_MASK); 500 501 xhci_writeq(&ctrl->ir_set->erst_base, val_64); 502 503 /* initializing the virtual devices to NULL */ 504 for (i = 0; i < MAX_HC_SLOTS; ++i) 505 ctrl->devs[i] = NULL; 506 507 /* 508 * Just Zero'ing this register completely, 509 * or some spurious Device Notification Events 510 * might screw things here. 511 */ 512 xhci_writel(&hcor->or_dnctrl, 0x0); 513 514 return 0; 515 } 516 517 /** 518 * Give the input control context for the passed container context 519 * 520 * @param ctx pointer to the context 521 * @return pointer to the Input control context data 522 */ 523 struct xhci_input_control_ctx 524 *xhci_get_input_control_ctx(struct xhci_container_ctx *ctx) 525 { 526 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); 527 return (struct xhci_input_control_ctx *)ctx->bytes; 528 } 529 530 /** 531 * Give the slot context for the passed container context 532 * 533 * @param ctrl Host controller data structure 534 * @param ctx pointer to the context 535 * @return pointer to the slot control context data 536 */ 537 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_ctrl *ctrl, 538 struct xhci_container_ctx *ctx) 539 { 540 if (ctx->type == XHCI_CTX_TYPE_DEVICE) 541 return (struct xhci_slot_ctx *)ctx->bytes; 542 543 return (struct xhci_slot_ctx *) 544 (ctx->bytes + CTX_SIZE(readl(&ctrl->hccr->cr_hccparams))); 545 } 546 547 /** 548 * Gets the EP context from based on the ep_index 549 * 550 * @param ctrl Host controller data structure 551 * @param ctx context container 552 * @param ep_index index of the endpoint 553 * @return pointer to the End point context 554 */ 555 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_ctrl *ctrl, 556 struct xhci_container_ctx *ctx, 557 unsigned int ep_index) 558 { 559 /* increment ep index by offset of start of ep ctx array */ 560 ep_index++; 561 if (ctx->type == XHCI_CTX_TYPE_INPUT) 562 ep_index++; 563 564 return (struct xhci_ep_ctx *) 565 (ctx->bytes + 566 (ep_index * CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)))); 567 } 568 569 /** 570 * Copy output xhci_ep_ctx to the input xhci_ep_ctx copy. 571 * Useful when you want to change one particular aspect of the endpoint 572 * and then issue a configure endpoint command. 573 * 574 * @param ctrl Host controller data structure 575 * @param in_ctx contains the input context 576 * @param out_ctx contains the input context 577 * @param ep_index index of the end point 578 * @return none 579 */ 580 void xhci_endpoint_copy(struct xhci_ctrl *ctrl, 581 struct xhci_container_ctx *in_ctx, 582 struct xhci_container_ctx *out_ctx, 583 unsigned int ep_index) 584 { 585 struct xhci_ep_ctx *out_ep_ctx; 586 struct xhci_ep_ctx *in_ep_ctx; 587 588 out_ep_ctx = xhci_get_ep_ctx(ctrl, out_ctx, ep_index); 589 in_ep_ctx = xhci_get_ep_ctx(ctrl, in_ctx, ep_index); 590 591 in_ep_ctx->ep_info = out_ep_ctx->ep_info; 592 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; 593 in_ep_ctx->deq = out_ep_ctx->deq; 594 in_ep_ctx->tx_info = out_ep_ctx->tx_info; 595 } 596 597 /** 598 * Copy output xhci_slot_ctx to the input xhci_slot_ctx. 599 * Useful when you want to change one particular aspect of the endpoint 600 * and then issue a configure endpoint command. 601 * Only the context entries field matters, but 602 * we'll copy the whole thing anyway. 603 * 604 * @param ctrl Host controller data structure 605 * @param in_ctx contains the inpout context 606 * @param out_ctx contains the inpout context 607 * @return none 608 */ 609 void xhci_slot_copy(struct xhci_ctrl *ctrl, struct xhci_container_ctx *in_ctx, 610 struct xhci_container_ctx *out_ctx) 611 { 612 struct xhci_slot_ctx *in_slot_ctx; 613 struct xhci_slot_ctx *out_slot_ctx; 614 615 in_slot_ctx = xhci_get_slot_ctx(ctrl, in_ctx); 616 out_slot_ctx = xhci_get_slot_ctx(ctrl, out_ctx); 617 618 in_slot_ctx->dev_info = out_slot_ctx->dev_info; 619 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; 620 in_slot_ctx->tt_info = out_slot_ctx->tt_info; 621 in_slot_ctx->dev_state = out_slot_ctx->dev_state; 622 } 623 624 /** 625 * Setup an xHCI virtual device for a Set Address command 626 * 627 * @param udev pointer to the Device Data Structure 628 * @return returns negative value on failure else 0 on success 629 */ 630 void xhci_setup_addressable_virt_dev(struct usb_device *udev) 631 { 632 struct usb_device *hop = udev; 633 struct xhci_virt_device *virt_dev; 634 struct xhci_ep_ctx *ep0_ctx; 635 struct xhci_slot_ctx *slot_ctx; 636 u32 port_num = 0; 637 u64 trb_64 = 0; 638 struct xhci_ctrl *ctrl = udev->controller; 639 640 virt_dev = ctrl->devs[udev->slot_id]; 641 642 BUG_ON(!virt_dev); 643 644 /* Extract the EP0 and Slot Ctrl */ 645 ep0_ctx = xhci_get_ep_ctx(ctrl, virt_dev->in_ctx, 0); 646 slot_ctx = xhci_get_slot_ctx(ctrl, virt_dev->in_ctx); 647 648 /* Only the control endpoint is valid - one endpoint context */ 649 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | 0); 650 651 switch (udev->speed) { 652 case USB_SPEED_SUPER: 653 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS); 654 break; 655 case USB_SPEED_HIGH: 656 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS); 657 break; 658 case USB_SPEED_FULL: 659 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS); 660 break; 661 case USB_SPEED_LOW: 662 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS); 663 break; 664 default: 665 /* Speed was set earlier, this shouldn't happen. */ 666 BUG(); 667 } 668 669 /* Extract the root hub port number */ 670 if (hop->parent) 671 while (hop->parent->parent) 672 hop = hop->parent; 673 port_num = hop->portnr; 674 debug("port_num = %d\n", port_num); 675 676 slot_ctx->dev_info2 |= 677 cpu_to_le32(((port_num & ROOT_HUB_PORT_MASK) << 678 ROOT_HUB_PORT_SHIFT)); 679 680 /* Step 4 - ring already allocated */ 681 /* Step 5 */ 682 ep0_ctx->ep_info2 = cpu_to_le32(CTRL_EP << EP_TYPE_SHIFT); 683 debug("SPEED = %d\n", udev->speed); 684 685 switch (udev->speed) { 686 case USB_SPEED_SUPER: 687 ep0_ctx->ep_info2 |= cpu_to_le32(((512 & MAX_PACKET_MASK) << 688 MAX_PACKET_SHIFT)); 689 debug("Setting Packet size = 512bytes\n"); 690 break; 691 case USB_SPEED_HIGH: 692 /* USB core guesses at a 64-byte max packet first for FS devices */ 693 case USB_SPEED_FULL: 694 ep0_ctx->ep_info2 |= cpu_to_le32(((64 & MAX_PACKET_MASK) << 695 MAX_PACKET_SHIFT)); 696 debug("Setting Packet size = 64bytes\n"); 697 break; 698 case USB_SPEED_LOW: 699 ep0_ctx->ep_info2 |= cpu_to_le32(((8 & MAX_PACKET_MASK) << 700 MAX_PACKET_SHIFT)); 701 debug("Setting Packet size = 8bytes\n"); 702 break; 703 default: 704 /* New speed? */ 705 BUG(); 706 } 707 708 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ 709 ep0_ctx->ep_info2 |= 710 cpu_to_le32(((0 & MAX_BURST_MASK) << MAX_BURST_SHIFT) | 711 ((3 & ERROR_COUNT_MASK) << ERROR_COUNT_SHIFT)); 712 713 trb_64 = (uintptr_t)virt_dev->eps[0].ring->first_seg->trbs; 714 ep0_ctx->deq = cpu_to_le64(trb_64 | virt_dev->eps[0].ring->cycle_state); 715 716 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ 717 718 xhci_flush_cache((uint32_t)ep0_ctx, sizeof(struct xhci_ep_ctx)); 719 xhci_flush_cache((uint32_t)slot_ctx, sizeof(struct xhci_slot_ctx)); 720 } 721