1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * USB4 specific functionality 4 * 5 * Copyright (C) 2019, Intel Corporation 6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> 7 * Rajmohan Mani <rajmohan.mani@intel.com> 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/ktime.h> 12 13 #include "sb_regs.h" 14 #include "tb.h" 15 16 #define USB4_DATA_RETRIES 3 17 18 enum usb4_sb_target { 19 USB4_SB_TARGET_ROUTER, 20 USB4_SB_TARGET_PARTNER, 21 USB4_SB_TARGET_RETIMER, 22 }; 23 24 #define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2) 25 #define USB4_NVM_READ_OFFSET_SHIFT 2 26 #define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24) 27 #define USB4_NVM_READ_LENGTH_SHIFT 24 28 29 #define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK 30 #define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT 31 32 #define USB4_DROM_ADDRESS_MASK GENMASK(14, 2) 33 #define USB4_DROM_ADDRESS_SHIFT 2 34 #define USB4_DROM_SIZE_MASK GENMASK(19, 15) 35 #define USB4_DROM_SIZE_SHIFT 15 36 37 #define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0) 38 39 #define USB4_BA_LENGTH_MASK GENMASK(7, 0) 40 #define USB4_BA_INDEX_MASK GENMASK(15, 0) 41 42 enum usb4_ba_index { 43 USB4_BA_MAX_USB3 = 0x1, 44 USB4_BA_MIN_DP_AUX = 0x2, 45 USB4_BA_MIN_DP_MAIN = 0x3, 46 USB4_BA_MAX_PCIE = 0x4, 47 USB4_BA_MAX_HI = 0x5, 48 }; 49 50 #define USB4_BA_VALUE_MASK GENMASK(31, 16) 51 #define USB4_BA_VALUE_SHIFT 16 52 53 static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, 54 u32 value, int timeout_msec) 55 { 56 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 57 58 do { 59 u32 val; 60 int ret; 61 62 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); 63 if (ret) 64 return ret; 65 66 if ((val & bit) == value) 67 return 0; 68 69 usleep_range(50, 100); 70 } while (ktime_before(ktime_get(), timeout)); 71 72 return -ETIMEDOUT; 73 } 74 75 static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode, 76 u32 *metadata, u8 *status, 77 const void *tx_data, size_t tx_dwords, 78 void *rx_data, size_t rx_dwords) 79 { 80 u32 val; 81 int ret; 82 83 if (metadata) { 84 ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); 85 if (ret) 86 return ret; 87 } 88 if (tx_dwords) { 89 ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9, 90 tx_dwords); 91 if (ret) 92 return ret; 93 } 94 95 val = opcode | ROUTER_CS_26_OV; 96 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); 97 if (ret) 98 return ret; 99 100 ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500); 101 if (ret) 102 return ret; 103 104 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); 105 if (ret) 106 return ret; 107 108 if (val & ROUTER_CS_26_ONS) 109 return -EOPNOTSUPP; 110 111 if (status) 112 *status = (val & ROUTER_CS_26_STATUS_MASK) >> 113 ROUTER_CS_26_STATUS_SHIFT; 114 115 if (metadata) { 116 ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); 117 if (ret) 118 return ret; 119 } 120 if (rx_dwords) { 121 ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9, 122 rx_dwords); 123 if (ret) 124 return ret; 125 } 126 127 return 0; 128 } 129 130 static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, 131 u8 *status, const void *tx_data, size_t tx_dwords, 132 void *rx_data, size_t rx_dwords) 133 { 134 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 135 136 if (tx_dwords > NVM_DATA_DWORDS || rx_dwords > NVM_DATA_DWORDS) 137 return -EINVAL; 138 139 /* 140 * If the connection manager implementation provides USB4 router 141 * operation proxy callback, call it here instead of running the 142 * operation natively. 143 */ 144 if (cm_ops->usb4_switch_op) { 145 int ret; 146 147 ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status, 148 tx_data, tx_dwords, rx_data, 149 rx_dwords); 150 if (ret != -EOPNOTSUPP) 151 return ret; 152 153 /* 154 * If the proxy was not supported then run the native 155 * router operation instead. 156 */ 157 } 158 159 return usb4_native_switch_op(sw, opcode, metadata, status, tx_data, 160 tx_dwords, rx_data, rx_dwords); 161 } 162 163 static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode, 164 u32 *metadata, u8 *status) 165 { 166 return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0); 167 } 168 169 static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode, 170 u32 *metadata, u8 *status, 171 const void *tx_data, size_t tx_dwords, 172 void *rx_data, size_t rx_dwords) 173 { 174 return __usb4_switch_op(sw, opcode, metadata, status, tx_data, 175 tx_dwords, rx_data, rx_dwords); 176 } 177 178 static void usb4_switch_check_wakes(struct tb_switch *sw) 179 { 180 struct tb_port *port; 181 bool wakeup = false; 182 u32 val; 183 184 if (!device_may_wakeup(&sw->dev)) 185 return; 186 187 if (tb_route(sw)) { 188 if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1)) 189 return; 190 191 tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n", 192 (val & ROUTER_CS_6_WOPS) ? "yes" : "no", 193 (val & ROUTER_CS_6_WOUS) ? "yes" : "no"); 194 195 wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS); 196 } 197 198 /* Check for any connected downstream ports for USB4 wake */ 199 tb_switch_for_each_port(sw, port) { 200 if (!tb_port_has_remote(port)) 201 continue; 202 203 if (tb_port_read(port, &val, TB_CFG_PORT, 204 port->cap_usb4 + PORT_CS_18, 1)) 205 break; 206 207 tb_port_dbg(port, "USB4 wake: %s\n", 208 (val & PORT_CS_18_WOU4S) ? "yes" : "no"); 209 210 if (val & PORT_CS_18_WOU4S) 211 wakeup = true; 212 } 213 214 if (wakeup) 215 pm_wakeup_event(&sw->dev, 0); 216 } 217 218 static bool link_is_usb4(struct tb_port *port) 219 { 220 u32 val; 221 222 if (!port->cap_usb4) 223 return false; 224 225 if (tb_port_read(port, &val, TB_CFG_PORT, 226 port->cap_usb4 + PORT_CS_18, 1)) 227 return false; 228 229 return !(val & PORT_CS_18_TCM); 230 } 231 232 /** 233 * usb4_switch_setup() - Additional setup for USB4 device 234 * @sw: USB4 router to setup 235 * 236 * USB4 routers need additional settings in order to enable all the 237 * tunneling. This function enables USB and PCIe tunneling if it can be 238 * enabled (e.g the parent switch also supports them). If USB tunneling 239 * is not available for some reason (like that there is Thunderbolt 3 240 * switch upstream) then the internal xHCI controller is enabled 241 * instead. 242 */ 243 int usb4_switch_setup(struct tb_switch *sw) 244 { 245 struct tb_port *downstream_port; 246 struct tb_switch *parent; 247 bool tbt3, xhci; 248 u32 val = 0; 249 int ret; 250 251 usb4_switch_check_wakes(sw); 252 253 if (!tb_route(sw)) 254 return 0; 255 256 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1); 257 if (ret) 258 return ret; 259 260 parent = tb_switch_parent(sw); 261 downstream_port = tb_port_at(tb_route(sw), parent); 262 sw->link_usb4 = link_is_usb4(downstream_port); 263 tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT"); 264 265 xhci = val & ROUTER_CS_6_HCI; 266 tbt3 = !(val & ROUTER_CS_6_TNS); 267 268 tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n", 269 tbt3 ? "yes" : "no", xhci ? "yes" : "no"); 270 271 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 272 if (ret) 273 return ret; 274 275 if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 && 276 tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) { 277 val |= ROUTER_CS_5_UTO; 278 xhci = false; 279 } 280 281 /* 282 * Only enable PCIe tunneling if the parent router supports it 283 * and it is not disabled. 284 */ 285 if (tb_acpi_may_tunnel_pcie() && 286 tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) { 287 val |= ROUTER_CS_5_PTO; 288 /* 289 * xHCI can be enabled if PCIe tunneling is supported 290 * and the parent does not have any USB3 dowstream 291 * adapters (so we cannot do USB 3.x tunneling). 292 */ 293 if (xhci) 294 val |= ROUTER_CS_5_HCO; 295 } 296 297 /* TBT3 supported by the CM */ 298 val |= ROUTER_CS_5_C3S; 299 /* Tunneling configuration is ready now */ 300 val |= ROUTER_CS_5_CV; 301 302 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 303 if (ret) 304 return ret; 305 306 return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR, 307 ROUTER_CS_6_CR, 50); 308 } 309 310 /** 311 * usb4_switch_read_uid() - Read UID from USB4 router 312 * @sw: USB4 router 313 * @uid: UID is stored here 314 * 315 * Reads 64-bit UID from USB4 router config space. 316 */ 317 int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid) 318 { 319 return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2); 320 } 321 322 static int usb4_switch_drom_read_block(void *data, 323 unsigned int dwaddress, void *buf, 324 size_t dwords) 325 { 326 struct tb_switch *sw = data; 327 u8 status = 0; 328 u32 metadata; 329 int ret; 330 331 metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK; 332 metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) & 333 USB4_DROM_ADDRESS_MASK; 334 335 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata, 336 &status, NULL, 0, buf, dwords); 337 if (ret) 338 return ret; 339 340 return status ? -EIO : 0; 341 } 342 343 /** 344 * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM 345 * @sw: USB4 router 346 * @address: Byte address inside DROM to start reading 347 * @buf: Buffer where the DROM content is stored 348 * @size: Number of bytes to read from DROM 349 * 350 * Uses USB4 router operations to read router DROM. For devices this 351 * should always work but for hosts it may return %-EOPNOTSUPP in which 352 * case the host router does not have DROM. 353 */ 354 int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, 355 size_t size) 356 { 357 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, 358 usb4_switch_drom_read_block, sw); 359 } 360 361 /** 362 * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding 363 * @sw: USB4 router 364 * 365 * Checks whether conditions are met so that lane bonding can be 366 * established with the upstream router. Call only for device routers. 367 */ 368 bool usb4_switch_lane_bonding_possible(struct tb_switch *sw) 369 { 370 struct tb_port *up; 371 int ret; 372 u32 val; 373 374 up = tb_upstream_port(sw); 375 ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1); 376 if (ret) 377 return false; 378 379 return !!(val & PORT_CS_18_BE); 380 } 381 382 /** 383 * usb4_switch_set_wake() - Enabled/disable wake 384 * @sw: USB4 router 385 * @flags: Wakeup flags (%0 to disable) 386 * 387 * Enables/disables router to wake up from sleep. 388 */ 389 int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags) 390 { 391 struct tb_port *port; 392 u64 route = tb_route(sw); 393 u32 val; 394 int ret; 395 396 /* 397 * Enable wakes coming from all USB4 downstream ports (from 398 * child routers). For device routers do this also for the 399 * upstream USB4 port. 400 */ 401 tb_switch_for_each_port(sw, port) { 402 if (!tb_port_is_null(port)) 403 continue; 404 if (!route && tb_is_upstream_port(port)) 405 continue; 406 if (!port->cap_usb4) 407 continue; 408 409 ret = tb_port_read(port, &val, TB_CFG_PORT, 410 port->cap_usb4 + PORT_CS_19, 1); 411 if (ret) 412 return ret; 413 414 val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4); 415 416 if (tb_is_upstream_port(port)) { 417 val |= PORT_CS_19_WOU4; 418 } else { 419 bool configured = val & PORT_CS_19_PC; 420 421 if ((flags & TB_WAKE_ON_CONNECT) && !configured) 422 val |= PORT_CS_19_WOC; 423 if ((flags & TB_WAKE_ON_DISCONNECT) && configured) 424 val |= PORT_CS_19_WOD; 425 if ((flags & TB_WAKE_ON_USB4) && configured) 426 val |= PORT_CS_19_WOU4; 427 } 428 429 ret = tb_port_write(port, &val, TB_CFG_PORT, 430 port->cap_usb4 + PORT_CS_19, 1); 431 if (ret) 432 return ret; 433 } 434 435 /* 436 * Enable wakes from PCIe, USB 3.x and DP on this router. Only 437 * needed for device routers. 438 */ 439 if (route) { 440 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 441 if (ret) 442 return ret; 443 444 val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU | ROUTER_CS_5_WOD); 445 if (flags & TB_WAKE_ON_USB3) 446 val |= ROUTER_CS_5_WOU; 447 if (flags & TB_WAKE_ON_PCIE) 448 val |= ROUTER_CS_5_WOP; 449 if (flags & TB_WAKE_ON_DP) 450 val |= ROUTER_CS_5_WOD; 451 452 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 453 if (ret) 454 return ret; 455 } 456 457 return 0; 458 } 459 460 /** 461 * usb4_switch_set_sleep() - Prepare the router to enter sleep 462 * @sw: USB4 router 463 * 464 * Sets sleep bit for the router. Returns when the router sleep ready 465 * bit has been asserted. 466 */ 467 int usb4_switch_set_sleep(struct tb_switch *sw) 468 { 469 int ret; 470 u32 val; 471 472 /* Set sleep bit and wait for sleep ready to be asserted */ 473 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 474 if (ret) 475 return ret; 476 477 val |= ROUTER_CS_5_SLP; 478 479 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 480 if (ret) 481 return ret; 482 483 return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR, 484 ROUTER_CS_6_SLPR, 500); 485 } 486 487 /** 488 * usb4_switch_nvm_sector_size() - Return router NVM sector size 489 * @sw: USB4 router 490 * 491 * If the router supports NVM operations this function returns the NVM 492 * sector size in bytes. If NVM operations are not supported returns 493 * %-EOPNOTSUPP. 494 */ 495 int usb4_switch_nvm_sector_size(struct tb_switch *sw) 496 { 497 u32 metadata; 498 u8 status; 499 int ret; 500 501 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata, 502 &status); 503 if (ret) 504 return ret; 505 506 if (status) 507 return status == 0x2 ? -EOPNOTSUPP : -EIO; 508 509 return metadata & USB4_NVM_SECTOR_SIZE_MASK; 510 } 511 512 static int usb4_switch_nvm_read_block(void *data, 513 unsigned int dwaddress, void *buf, size_t dwords) 514 { 515 struct tb_switch *sw = data; 516 u8 status = 0; 517 u32 metadata; 518 int ret; 519 520 metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) & 521 USB4_NVM_READ_LENGTH_MASK; 522 metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) & 523 USB4_NVM_READ_OFFSET_MASK; 524 525 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata, 526 &status, NULL, 0, buf, dwords); 527 if (ret) 528 return ret; 529 530 return status ? -EIO : 0; 531 } 532 533 /** 534 * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM 535 * @sw: USB4 router 536 * @address: Starting address in bytes 537 * @buf: Read data is placed here 538 * @size: How many bytes to read 539 * 540 * Reads NVM contents of the router. If NVM is not supported returns 541 * %-EOPNOTSUPP. 542 */ 543 int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, 544 size_t size) 545 { 546 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, 547 usb4_switch_nvm_read_block, sw); 548 } 549 550 /** 551 * usb4_switch_nvm_set_offset() - Set NVM write offset 552 * @sw: USB4 router 553 * @address: Start offset 554 * 555 * Explicitly sets NVM write offset. Normally when writing to NVM this 556 * is done automatically by usb4_switch_nvm_write(). 557 * 558 * Returns %0 in success and negative errno if there was a failure. 559 */ 560 int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address) 561 { 562 u32 metadata, dwaddress; 563 u8 status = 0; 564 int ret; 565 566 dwaddress = address / 4; 567 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & 568 USB4_NVM_SET_OFFSET_MASK; 569 570 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata, 571 &status); 572 if (ret) 573 return ret; 574 575 return status ? -EIO : 0; 576 } 577 578 static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress, 579 const void *buf, size_t dwords) 580 { 581 struct tb_switch *sw = data; 582 u8 status; 583 int ret; 584 585 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status, 586 buf, dwords, NULL, 0); 587 if (ret) 588 return ret; 589 590 return status ? -EIO : 0; 591 } 592 593 /** 594 * usb4_switch_nvm_write() - Write to the router NVM 595 * @sw: USB4 router 596 * @address: Start address where to write in bytes 597 * @buf: Pointer to the data to write 598 * @size: Size of @buf in bytes 599 * 600 * Writes @buf to the router NVM using USB4 router operations. If NVM 601 * write is not supported returns %-EOPNOTSUPP. 602 */ 603 int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, 604 const void *buf, size_t size) 605 { 606 int ret; 607 608 ret = usb4_switch_nvm_set_offset(sw, address); 609 if (ret) 610 return ret; 611 612 return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES, 613 usb4_switch_nvm_write_next_block, sw); 614 } 615 616 /** 617 * usb4_switch_nvm_authenticate() - Authenticate new NVM 618 * @sw: USB4 router 619 * 620 * After the new NVM has been written via usb4_switch_nvm_write(), this 621 * function triggers NVM authentication process. The router gets power 622 * cycled and if the authentication is successful the new NVM starts 623 * running. In case of failure returns negative errno. 624 * 625 * The caller should call usb4_switch_nvm_authenticate_status() to read 626 * the status of the authentication after power cycle. It should be the 627 * first router operation to avoid the status being lost. 628 */ 629 int usb4_switch_nvm_authenticate(struct tb_switch *sw) 630 { 631 int ret; 632 633 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL); 634 switch (ret) { 635 /* 636 * The router is power cycled once NVM_AUTH is started so it is 637 * expected to get any of the following errors back. 638 */ 639 case -EACCES: 640 case -ENOTCONN: 641 case -ETIMEDOUT: 642 return 0; 643 644 default: 645 return ret; 646 } 647 } 648 649 /** 650 * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate 651 * @sw: USB4 router 652 * @status: Status code of the operation 653 * 654 * The function checks if there is status available from the last NVM 655 * authenticate router operation. If there is status then %0 is returned 656 * and the status code is placed in @status. Returns negative errno in case 657 * of failure. 658 * 659 * Must be called before any other router operation. 660 */ 661 int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status) 662 { 663 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 664 u16 opcode; 665 u32 val; 666 int ret; 667 668 if (cm_ops->usb4_switch_nvm_authenticate_status) { 669 ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status); 670 if (ret != -EOPNOTSUPP) 671 return ret; 672 } 673 674 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); 675 if (ret) 676 return ret; 677 678 /* Check that the opcode is correct */ 679 opcode = val & ROUTER_CS_26_OPCODE_MASK; 680 if (opcode == USB4_SWITCH_OP_NVM_AUTH) { 681 if (val & ROUTER_CS_26_OV) 682 return -EBUSY; 683 if (val & ROUTER_CS_26_ONS) 684 return -EOPNOTSUPP; 685 686 *status = (val & ROUTER_CS_26_STATUS_MASK) >> 687 ROUTER_CS_26_STATUS_SHIFT; 688 } else { 689 *status = 0; 690 } 691 692 return 0; 693 } 694 695 /** 696 * usb4_switch_credits_init() - Read buffer allocation parameters 697 * @sw: USB4 router 698 * 699 * Reads @sw buffer allocation parameters and initializes @sw buffer 700 * allocation fields accordingly. Specifically @sw->credits_allocation 701 * is set to %true if these parameters can be used in tunneling. 702 * 703 * Returns %0 on success and negative errno otherwise. 704 */ 705 int usb4_switch_credits_init(struct tb_switch *sw) 706 { 707 int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma; 708 int ret, length, i, nports; 709 const struct tb_port *port; 710 u32 data[NVM_DATA_DWORDS]; 711 u32 metadata = 0; 712 u8 status = 0; 713 714 memset(data, 0, sizeof(data)); 715 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_BUFFER_ALLOC, &metadata, 716 &status, NULL, 0, data, ARRAY_SIZE(data)); 717 if (ret) 718 return ret; 719 if (status) 720 return -EIO; 721 722 length = metadata & USB4_BA_LENGTH_MASK; 723 if (WARN_ON(length > ARRAY_SIZE(data))) 724 return -EMSGSIZE; 725 726 max_usb3 = -1; 727 min_dp_aux = -1; 728 min_dp_main = -1; 729 max_pcie = -1; 730 max_dma = -1; 731 732 tb_sw_dbg(sw, "credit allocation parameters:\n"); 733 734 for (i = 0; i < length; i++) { 735 u16 index, value; 736 737 index = data[i] & USB4_BA_INDEX_MASK; 738 value = (data[i] & USB4_BA_VALUE_MASK) >> USB4_BA_VALUE_SHIFT; 739 740 switch (index) { 741 case USB4_BA_MAX_USB3: 742 tb_sw_dbg(sw, " USB3: %u\n", value); 743 max_usb3 = value; 744 break; 745 case USB4_BA_MIN_DP_AUX: 746 tb_sw_dbg(sw, " DP AUX: %u\n", value); 747 min_dp_aux = value; 748 break; 749 case USB4_BA_MIN_DP_MAIN: 750 tb_sw_dbg(sw, " DP main: %u\n", value); 751 min_dp_main = value; 752 break; 753 case USB4_BA_MAX_PCIE: 754 tb_sw_dbg(sw, " PCIe: %u\n", value); 755 max_pcie = value; 756 break; 757 case USB4_BA_MAX_HI: 758 tb_sw_dbg(sw, " DMA: %u\n", value); 759 max_dma = value; 760 break; 761 default: 762 tb_sw_dbg(sw, " unknown credit allocation index %#x, skipping\n", 763 index); 764 break; 765 } 766 } 767 768 /* 769 * Validate the buffer allocation preferences. If we find 770 * issues, log a warning and fall back using the hard-coded 771 * values. 772 */ 773 774 /* Host router must report baMaxHI */ 775 if (!tb_route(sw) && max_dma < 0) { 776 tb_sw_warn(sw, "host router is missing baMaxHI\n"); 777 goto err_invalid; 778 } 779 780 nports = 0; 781 tb_switch_for_each_port(sw, port) { 782 if (tb_port_is_null(port)) 783 nports++; 784 } 785 786 /* Must have DP buffer allocation (multiple USB4 ports) */ 787 if (nports > 2 && (min_dp_aux < 0 || min_dp_main < 0)) { 788 tb_sw_warn(sw, "multiple USB4 ports require baMinDPaux/baMinDPmain\n"); 789 goto err_invalid; 790 } 791 792 tb_switch_for_each_port(sw, port) { 793 if (tb_port_is_dpout(port) && min_dp_main < 0) { 794 tb_sw_warn(sw, "missing baMinDPmain"); 795 goto err_invalid; 796 } 797 if ((tb_port_is_dpin(port) || tb_port_is_dpout(port)) && 798 min_dp_aux < 0) { 799 tb_sw_warn(sw, "missing baMinDPaux"); 800 goto err_invalid; 801 } 802 if ((tb_port_is_usb3_down(port) || tb_port_is_usb3_up(port)) && 803 max_usb3 < 0) { 804 tb_sw_warn(sw, "missing baMaxUSB3"); 805 goto err_invalid; 806 } 807 if ((tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) && 808 max_pcie < 0) { 809 tb_sw_warn(sw, "missing baMaxPCIe"); 810 goto err_invalid; 811 } 812 } 813 814 /* 815 * Buffer allocation passed the validation so we can use it in 816 * path creation. 817 */ 818 sw->credit_allocation = true; 819 if (max_usb3 > 0) 820 sw->max_usb3_credits = max_usb3; 821 if (min_dp_aux > 0) 822 sw->min_dp_aux_credits = min_dp_aux; 823 if (min_dp_main > 0) 824 sw->min_dp_main_credits = min_dp_main; 825 if (max_pcie > 0) 826 sw->max_pcie_credits = max_pcie; 827 if (max_dma > 0) 828 sw->max_dma_credits = max_dma; 829 830 return 0; 831 832 err_invalid: 833 return -EINVAL; 834 } 835 836 /** 837 * usb4_switch_query_dp_resource() - Query availability of DP IN resource 838 * @sw: USB4 router 839 * @in: DP IN adapter 840 * 841 * For DP tunneling this function can be used to query availability of 842 * DP IN resource. Returns true if the resource is available for DP 843 * tunneling, false otherwise. 844 */ 845 bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) 846 { 847 u32 metadata = in->port; 848 u8 status; 849 int ret; 850 851 ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata, 852 &status); 853 /* 854 * If DP resource allocation is not supported assume it is 855 * always available. 856 */ 857 if (ret == -EOPNOTSUPP) 858 return true; 859 else if (ret) 860 return false; 861 862 return !status; 863 } 864 865 /** 866 * usb4_switch_alloc_dp_resource() - Allocate DP IN resource 867 * @sw: USB4 router 868 * @in: DP IN adapter 869 * 870 * Allocates DP IN resource for DP tunneling using USB4 router 871 * operations. If the resource was allocated returns %0. Otherwise 872 * returns negative errno, in particular %-EBUSY if the resource is 873 * already allocated. 874 */ 875 int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 876 { 877 u32 metadata = in->port; 878 u8 status; 879 int ret; 880 881 ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata, 882 &status); 883 if (ret == -EOPNOTSUPP) 884 return 0; 885 else if (ret) 886 return ret; 887 888 return status ? -EBUSY : 0; 889 } 890 891 /** 892 * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource 893 * @sw: USB4 router 894 * @in: DP IN adapter 895 * 896 * Releases the previously allocated DP IN resource. 897 */ 898 int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 899 { 900 u32 metadata = in->port; 901 u8 status; 902 int ret; 903 904 ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata, 905 &status); 906 if (ret == -EOPNOTSUPP) 907 return 0; 908 else if (ret) 909 return ret; 910 911 return status ? -EIO : 0; 912 } 913 914 static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port) 915 { 916 struct tb_port *p; 917 int usb4_idx = 0; 918 919 /* Assume port is primary */ 920 tb_switch_for_each_port(sw, p) { 921 if (!tb_port_is_null(p)) 922 continue; 923 if (tb_is_upstream_port(p)) 924 continue; 925 if (!p->link_nr) { 926 if (p == port) 927 break; 928 usb4_idx++; 929 } 930 } 931 932 return usb4_idx; 933 } 934 935 /** 936 * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter 937 * @sw: USB4 router 938 * @port: USB4 port 939 * 940 * USB4 routers have direct mapping between USB4 ports and PCIe 941 * downstream adapters where the PCIe topology is extended. This 942 * function returns the corresponding downstream PCIe adapter or %NULL 943 * if no such mapping was possible. 944 */ 945 struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, 946 const struct tb_port *port) 947 { 948 int usb4_idx = usb4_port_idx(sw, port); 949 struct tb_port *p; 950 int pcie_idx = 0; 951 952 /* Find PCIe down port matching usb4_port */ 953 tb_switch_for_each_port(sw, p) { 954 if (!tb_port_is_pcie_down(p)) 955 continue; 956 957 if (pcie_idx == usb4_idx) 958 return p; 959 960 pcie_idx++; 961 } 962 963 return NULL; 964 } 965 966 /** 967 * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter 968 * @sw: USB4 router 969 * @port: USB4 port 970 * 971 * USB4 routers have direct mapping between USB4 ports and USB 3.x 972 * downstream adapters where the USB 3.x topology is extended. This 973 * function returns the corresponding downstream USB 3.x adapter or 974 * %NULL if no such mapping was possible. 975 */ 976 struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, 977 const struct tb_port *port) 978 { 979 int usb4_idx = usb4_port_idx(sw, port); 980 struct tb_port *p; 981 int usb_idx = 0; 982 983 /* Find USB3 down port matching usb4_port */ 984 tb_switch_for_each_port(sw, p) { 985 if (!tb_port_is_usb3_down(p)) 986 continue; 987 988 if (usb_idx == usb4_idx) 989 return p; 990 991 usb_idx++; 992 } 993 994 return NULL; 995 } 996 997 /** 998 * usb4_switch_add_ports() - Add USB4 ports for this router 999 * @sw: USB4 router 1000 * 1001 * For USB4 router finds all USB4 ports and registers devices for each. 1002 * Can be called to any router. 1003 * 1004 * Return %0 in case of success and negative errno in case of failure. 1005 */ 1006 int usb4_switch_add_ports(struct tb_switch *sw) 1007 { 1008 struct tb_port *port; 1009 1010 if (tb_switch_is_icm(sw) || !tb_switch_is_usb4(sw)) 1011 return 0; 1012 1013 tb_switch_for_each_port(sw, port) { 1014 struct usb4_port *usb4; 1015 1016 if (!tb_port_is_null(port)) 1017 continue; 1018 if (!port->cap_usb4) 1019 continue; 1020 1021 usb4 = usb4_port_device_add(port); 1022 if (IS_ERR(usb4)) { 1023 usb4_switch_remove_ports(sw); 1024 return PTR_ERR(usb4); 1025 } 1026 1027 port->usb4 = usb4; 1028 } 1029 1030 return 0; 1031 } 1032 1033 /** 1034 * usb4_switch_remove_ports() - Removes USB4 ports from this router 1035 * @sw: USB4 router 1036 * 1037 * Unregisters previously registered USB4 ports. 1038 */ 1039 void usb4_switch_remove_ports(struct tb_switch *sw) 1040 { 1041 struct tb_port *port; 1042 1043 tb_switch_for_each_port(sw, port) { 1044 if (port->usb4) { 1045 usb4_port_device_remove(port->usb4); 1046 port->usb4 = NULL; 1047 } 1048 } 1049 } 1050 1051 /** 1052 * usb4_port_unlock() - Unlock USB4 downstream port 1053 * @port: USB4 port to unlock 1054 * 1055 * Unlocks USB4 downstream port so that the connection manager can 1056 * access the router below this port. 1057 */ 1058 int usb4_port_unlock(struct tb_port *port) 1059 { 1060 int ret; 1061 u32 val; 1062 1063 ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1); 1064 if (ret) 1065 return ret; 1066 1067 val &= ~ADP_CS_4_LCK; 1068 return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1); 1069 } 1070 1071 static int usb4_port_set_configured(struct tb_port *port, bool configured) 1072 { 1073 int ret; 1074 u32 val; 1075 1076 if (!port->cap_usb4) 1077 return -EINVAL; 1078 1079 ret = tb_port_read(port, &val, TB_CFG_PORT, 1080 port->cap_usb4 + PORT_CS_19, 1); 1081 if (ret) 1082 return ret; 1083 1084 if (configured) 1085 val |= PORT_CS_19_PC; 1086 else 1087 val &= ~PORT_CS_19_PC; 1088 1089 return tb_port_write(port, &val, TB_CFG_PORT, 1090 port->cap_usb4 + PORT_CS_19, 1); 1091 } 1092 1093 /** 1094 * usb4_port_configure() - Set USB4 port configured 1095 * @port: USB4 router 1096 * 1097 * Sets the USB4 link to be configured for power management purposes. 1098 */ 1099 int usb4_port_configure(struct tb_port *port) 1100 { 1101 return usb4_port_set_configured(port, true); 1102 } 1103 1104 /** 1105 * usb4_port_unconfigure() - Set USB4 port unconfigured 1106 * @port: USB4 router 1107 * 1108 * Sets the USB4 link to be unconfigured for power management purposes. 1109 */ 1110 void usb4_port_unconfigure(struct tb_port *port) 1111 { 1112 usb4_port_set_configured(port, false); 1113 } 1114 1115 static int usb4_set_xdomain_configured(struct tb_port *port, bool configured) 1116 { 1117 int ret; 1118 u32 val; 1119 1120 if (!port->cap_usb4) 1121 return -EINVAL; 1122 1123 ret = tb_port_read(port, &val, TB_CFG_PORT, 1124 port->cap_usb4 + PORT_CS_19, 1); 1125 if (ret) 1126 return ret; 1127 1128 if (configured) 1129 val |= PORT_CS_19_PID; 1130 else 1131 val &= ~PORT_CS_19_PID; 1132 1133 return tb_port_write(port, &val, TB_CFG_PORT, 1134 port->cap_usb4 + PORT_CS_19, 1); 1135 } 1136 1137 /** 1138 * usb4_port_configure_xdomain() - Configure port for XDomain 1139 * @port: USB4 port connected to another host 1140 * 1141 * Marks the USB4 port as being connected to another host. Returns %0 in 1142 * success and negative errno in failure. 1143 */ 1144 int usb4_port_configure_xdomain(struct tb_port *port) 1145 { 1146 return usb4_set_xdomain_configured(port, true); 1147 } 1148 1149 /** 1150 * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain 1151 * @port: USB4 port that was connected to another host 1152 * 1153 * Clears USB4 port from being marked as XDomain. 1154 */ 1155 void usb4_port_unconfigure_xdomain(struct tb_port *port) 1156 { 1157 usb4_set_xdomain_configured(port, false); 1158 } 1159 1160 static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit, 1161 u32 value, int timeout_msec) 1162 { 1163 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 1164 1165 do { 1166 u32 val; 1167 int ret; 1168 1169 ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1); 1170 if (ret) 1171 return ret; 1172 1173 if ((val & bit) == value) 1174 return 0; 1175 1176 usleep_range(50, 100); 1177 } while (ktime_before(ktime_get(), timeout)); 1178 1179 return -ETIMEDOUT; 1180 } 1181 1182 static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords) 1183 { 1184 if (dwords > NVM_DATA_DWORDS) 1185 return -EINVAL; 1186 1187 return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2, 1188 dwords); 1189 } 1190 1191 static int usb4_port_write_data(struct tb_port *port, const void *data, 1192 size_t dwords) 1193 { 1194 if (dwords > NVM_DATA_DWORDS) 1195 return -EINVAL; 1196 1197 return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2, 1198 dwords); 1199 } 1200 1201 static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, 1202 u8 index, u8 reg, void *buf, u8 size) 1203 { 1204 size_t dwords = DIV_ROUND_UP(size, 4); 1205 int ret; 1206 u32 val; 1207 1208 if (!port->cap_usb4) 1209 return -EINVAL; 1210 1211 val = reg; 1212 val |= size << PORT_CS_1_LENGTH_SHIFT; 1213 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK; 1214 if (target == USB4_SB_TARGET_RETIMER) 1215 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT); 1216 val |= PORT_CS_1_PND; 1217 1218 ret = tb_port_write(port, &val, TB_CFG_PORT, 1219 port->cap_usb4 + PORT_CS_1, 1); 1220 if (ret) 1221 return ret; 1222 1223 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1, 1224 PORT_CS_1_PND, 0, 500); 1225 if (ret) 1226 return ret; 1227 1228 ret = tb_port_read(port, &val, TB_CFG_PORT, 1229 port->cap_usb4 + PORT_CS_1, 1); 1230 if (ret) 1231 return ret; 1232 1233 if (val & PORT_CS_1_NR) 1234 return -ENODEV; 1235 if (val & PORT_CS_1_RC) 1236 return -EIO; 1237 1238 return buf ? usb4_port_read_data(port, buf, dwords) : 0; 1239 } 1240 1241 static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target, 1242 u8 index, u8 reg, const void *buf, u8 size) 1243 { 1244 size_t dwords = DIV_ROUND_UP(size, 4); 1245 int ret; 1246 u32 val; 1247 1248 if (!port->cap_usb4) 1249 return -EINVAL; 1250 1251 if (buf) { 1252 ret = usb4_port_write_data(port, buf, dwords); 1253 if (ret) 1254 return ret; 1255 } 1256 1257 val = reg; 1258 val |= size << PORT_CS_1_LENGTH_SHIFT; 1259 val |= PORT_CS_1_WNR_WRITE; 1260 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK; 1261 if (target == USB4_SB_TARGET_RETIMER) 1262 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT); 1263 val |= PORT_CS_1_PND; 1264 1265 ret = tb_port_write(port, &val, TB_CFG_PORT, 1266 port->cap_usb4 + PORT_CS_1, 1); 1267 if (ret) 1268 return ret; 1269 1270 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1, 1271 PORT_CS_1_PND, 0, 500); 1272 if (ret) 1273 return ret; 1274 1275 ret = tb_port_read(port, &val, TB_CFG_PORT, 1276 port->cap_usb4 + PORT_CS_1, 1); 1277 if (ret) 1278 return ret; 1279 1280 if (val & PORT_CS_1_NR) 1281 return -ENODEV; 1282 if (val & PORT_CS_1_RC) 1283 return -EIO; 1284 1285 return 0; 1286 } 1287 1288 static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target, 1289 u8 index, enum usb4_sb_opcode opcode, int timeout_msec) 1290 { 1291 ktime_t timeout; 1292 u32 val; 1293 int ret; 1294 1295 val = opcode; 1296 ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val, 1297 sizeof(val)); 1298 if (ret) 1299 return ret; 1300 1301 timeout = ktime_add_ms(ktime_get(), timeout_msec); 1302 1303 do { 1304 /* Check results */ 1305 ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE, 1306 &val, sizeof(val)); 1307 if (ret) 1308 return ret; 1309 1310 switch (val) { 1311 case 0: 1312 return 0; 1313 1314 case USB4_SB_OPCODE_ERR: 1315 return -EAGAIN; 1316 1317 case USB4_SB_OPCODE_ONS: 1318 return -EOPNOTSUPP; 1319 1320 default: 1321 if (val != opcode) 1322 return -EIO; 1323 break; 1324 } 1325 } while (ktime_before(ktime_get(), timeout)); 1326 1327 return -ETIMEDOUT; 1328 } 1329 1330 static int usb4_port_set_router_offline(struct tb_port *port, bool offline) 1331 { 1332 u32 val = !offline; 1333 int ret; 1334 1335 ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1336 USB4_SB_METADATA, &val, sizeof(val)); 1337 if (ret) 1338 return ret; 1339 1340 val = USB4_SB_OPCODE_ROUTER_OFFLINE; 1341 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1342 USB4_SB_OPCODE, &val, sizeof(val)); 1343 } 1344 1345 /** 1346 * usb4_port_router_offline() - Put the USB4 port to offline mode 1347 * @port: USB4 port 1348 * 1349 * This function puts the USB4 port into offline mode. In this mode the 1350 * port does not react on hotplug events anymore. This needs to be 1351 * called before retimer access is done when the USB4 links is not up. 1352 * 1353 * Returns %0 in case of success and negative errno if there was an 1354 * error. 1355 */ 1356 int usb4_port_router_offline(struct tb_port *port) 1357 { 1358 return usb4_port_set_router_offline(port, true); 1359 } 1360 1361 /** 1362 * usb4_port_router_online() - Put the USB4 port back to online 1363 * @port: USB4 port 1364 * 1365 * Makes the USB4 port functional again. 1366 */ 1367 int usb4_port_router_online(struct tb_port *port) 1368 { 1369 return usb4_port_set_router_offline(port, false); 1370 } 1371 1372 /** 1373 * usb4_port_enumerate_retimers() - Send RT broadcast transaction 1374 * @port: USB4 port 1375 * 1376 * This forces the USB4 port to send broadcast RT transaction which 1377 * makes the retimers on the link to assign index to themselves. Returns 1378 * %0 in case of success and negative errno if there was an error. 1379 */ 1380 int usb4_port_enumerate_retimers(struct tb_port *port) 1381 { 1382 u32 val; 1383 1384 val = USB4_SB_OPCODE_ENUMERATE_RETIMERS; 1385 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1386 USB4_SB_OPCODE, &val, sizeof(val)); 1387 } 1388 1389 static inline int usb4_port_retimer_op(struct tb_port *port, u8 index, 1390 enum usb4_sb_opcode opcode, 1391 int timeout_msec) 1392 { 1393 return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode, 1394 timeout_msec); 1395 } 1396 1397 /** 1398 * usb4_port_retimer_set_inbound_sbtx() - Enable sideband channel transactions 1399 * @port: USB4 port 1400 * @index: Retimer index 1401 * 1402 * Enables sideband channel transations on SBTX. Can be used when USB4 1403 * link does not go up, for example if there is no device connected. 1404 */ 1405 int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index) 1406 { 1407 int ret; 1408 1409 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX, 1410 500); 1411 1412 if (ret != -ENODEV) 1413 return ret; 1414 1415 /* 1416 * Per the USB4 retimer spec, the retimer is not required to 1417 * send an RT (Retimer Transaction) response for the first 1418 * SET_INBOUND_SBTX command 1419 */ 1420 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX, 1421 500); 1422 } 1423 1424 /** 1425 * usb4_port_retimer_read() - Read from retimer sideband registers 1426 * @port: USB4 port 1427 * @index: Retimer index 1428 * @reg: Sideband register to read 1429 * @buf: Data from @reg is stored here 1430 * @size: Number of bytes to read 1431 * 1432 * Function reads retimer sideband registers starting from @reg. The 1433 * retimer is connected to @port at @index. Returns %0 in case of 1434 * success, and read data is copied to @buf. If there is no retimer 1435 * present at given @index returns %-ENODEV. In any other failure 1436 * returns negative errno. 1437 */ 1438 int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf, 1439 u8 size) 1440 { 1441 return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf, 1442 size); 1443 } 1444 1445 /** 1446 * usb4_port_retimer_write() - Write to retimer sideband registers 1447 * @port: USB4 port 1448 * @index: Retimer index 1449 * @reg: Sideband register to write 1450 * @buf: Data that is written starting from @reg 1451 * @size: Number of bytes to write 1452 * 1453 * Writes retimer sideband registers starting from @reg. The retimer is 1454 * connected to @port at @index. Returns %0 in case of success. If there 1455 * is no retimer present at given @index returns %-ENODEV. In any other 1456 * failure returns negative errno. 1457 */ 1458 int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg, 1459 const void *buf, u8 size) 1460 { 1461 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf, 1462 size); 1463 } 1464 1465 /** 1466 * usb4_port_retimer_is_last() - Is the retimer last on-board retimer 1467 * @port: USB4 port 1468 * @index: Retimer index 1469 * 1470 * If the retimer at @index is last one (connected directly to the 1471 * Type-C port) this function returns %1. If it is not returns %0. If 1472 * the retimer is not present returns %-ENODEV. Otherwise returns 1473 * negative errno. 1474 */ 1475 int usb4_port_retimer_is_last(struct tb_port *port, u8 index) 1476 { 1477 u32 metadata; 1478 int ret; 1479 1480 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER, 1481 500); 1482 if (ret) 1483 return ret; 1484 1485 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata, 1486 sizeof(metadata)); 1487 return ret ? ret : metadata & 1; 1488 } 1489 1490 /** 1491 * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size 1492 * @port: USB4 port 1493 * @index: Retimer index 1494 * 1495 * Reads NVM sector size (in bytes) of a retimer at @index. This 1496 * operation can be used to determine whether the retimer supports NVM 1497 * upgrade for example. Returns sector size in bytes or negative errno 1498 * in case of error. Specifically returns %-ENODEV if there is no 1499 * retimer at @index. 1500 */ 1501 int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index) 1502 { 1503 u32 metadata; 1504 int ret; 1505 1506 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE, 1507 500); 1508 if (ret) 1509 return ret; 1510 1511 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata, 1512 sizeof(metadata)); 1513 return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK; 1514 } 1515 1516 /** 1517 * usb4_port_retimer_nvm_set_offset() - Set NVM write offset 1518 * @port: USB4 port 1519 * @index: Retimer index 1520 * @address: Start offset 1521 * 1522 * Exlicitly sets NVM write offset. Normally when writing to NVM this is 1523 * done automatically by usb4_port_retimer_nvm_write(). 1524 * 1525 * Returns %0 in success and negative errno if there was a failure. 1526 */ 1527 int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index, 1528 unsigned int address) 1529 { 1530 u32 metadata, dwaddress; 1531 int ret; 1532 1533 dwaddress = address / 4; 1534 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & 1535 USB4_NVM_SET_OFFSET_MASK; 1536 1537 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata, 1538 sizeof(metadata)); 1539 if (ret) 1540 return ret; 1541 1542 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET, 1543 500); 1544 } 1545 1546 struct retimer_info { 1547 struct tb_port *port; 1548 u8 index; 1549 }; 1550 1551 static int usb4_port_retimer_nvm_write_next_block(void *data, 1552 unsigned int dwaddress, const void *buf, size_t dwords) 1553 1554 { 1555 const struct retimer_info *info = data; 1556 struct tb_port *port = info->port; 1557 u8 index = info->index; 1558 int ret; 1559 1560 ret = usb4_port_retimer_write(port, index, USB4_SB_DATA, 1561 buf, dwords * 4); 1562 if (ret) 1563 return ret; 1564 1565 return usb4_port_retimer_op(port, index, 1566 USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000); 1567 } 1568 1569 /** 1570 * usb4_port_retimer_nvm_write() - Write to retimer NVM 1571 * @port: USB4 port 1572 * @index: Retimer index 1573 * @address: Byte address where to start the write 1574 * @buf: Data to write 1575 * @size: Size in bytes how much to write 1576 * 1577 * Writes @size bytes from @buf to the retimer NVM. Used for NVM 1578 * upgrade. Returns %0 if the data was written successfully and negative 1579 * errno in case of failure. Specifically returns %-ENODEV if there is 1580 * no retimer at @index. 1581 */ 1582 int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address, 1583 const void *buf, size_t size) 1584 { 1585 struct retimer_info info = { .port = port, .index = index }; 1586 int ret; 1587 1588 ret = usb4_port_retimer_nvm_set_offset(port, index, address); 1589 if (ret) 1590 return ret; 1591 1592 return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES, 1593 usb4_port_retimer_nvm_write_next_block, &info); 1594 } 1595 1596 /** 1597 * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade 1598 * @port: USB4 port 1599 * @index: Retimer index 1600 * 1601 * After the new NVM image has been written via usb4_port_retimer_nvm_write() 1602 * this function can be used to trigger the NVM upgrade process. If 1603 * successful the retimer restarts with the new NVM and may not have the 1604 * index set so one needs to call usb4_port_enumerate_retimers() to 1605 * force index to be assigned. 1606 */ 1607 int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index) 1608 { 1609 u32 val; 1610 1611 /* 1612 * We need to use the raw operation here because once the 1613 * authentication completes the retimer index is not set anymore 1614 * so we do not get back the status now. 1615 */ 1616 val = USB4_SB_OPCODE_NVM_AUTH_WRITE; 1617 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, 1618 USB4_SB_OPCODE, &val, sizeof(val)); 1619 } 1620 1621 /** 1622 * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade 1623 * @port: USB4 port 1624 * @index: Retimer index 1625 * @status: Raw status code read from metadata 1626 * 1627 * This can be called after usb4_port_retimer_nvm_authenticate() and 1628 * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade. 1629 * 1630 * Returns %0 if the authentication status was successfully read. The 1631 * completion metadata (the result) is then stored into @status. If 1632 * reading the status fails, returns negative errno. 1633 */ 1634 int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index, 1635 u32 *status) 1636 { 1637 u32 metadata, val; 1638 int ret; 1639 1640 ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val, 1641 sizeof(val)); 1642 if (ret) 1643 return ret; 1644 1645 switch (val) { 1646 case 0: 1647 *status = 0; 1648 return 0; 1649 1650 case USB4_SB_OPCODE_ERR: 1651 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, 1652 &metadata, sizeof(metadata)); 1653 if (ret) 1654 return ret; 1655 1656 *status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK; 1657 return 0; 1658 1659 case USB4_SB_OPCODE_ONS: 1660 return -EOPNOTSUPP; 1661 1662 default: 1663 return -EIO; 1664 } 1665 } 1666 1667 static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress, 1668 void *buf, size_t dwords) 1669 { 1670 const struct retimer_info *info = data; 1671 struct tb_port *port = info->port; 1672 u8 index = info->index; 1673 u32 metadata; 1674 int ret; 1675 1676 metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT; 1677 if (dwords < NVM_DATA_DWORDS) 1678 metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT; 1679 1680 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata, 1681 sizeof(metadata)); 1682 if (ret) 1683 return ret; 1684 1685 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500); 1686 if (ret) 1687 return ret; 1688 1689 return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf, 1690 dwords * 4); 1691 } 1692 1693 /** 1694 * usb4_port_retimer_nvm_read() - Read contents of retimer NVM 1695 * @port: USB4 port 1696 * @index: Retimer index 1697 * @address: NVM address (in bytes) to start reading 1698 * @buf: Data read from NVM is stored here 1699 * @size: Number of bytes to read 1700 * 1701 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the 1702 * read was successful and negative errno in case of failure. 1703 * Specifically returns %-ENODEV if there is no retimer at @index. 1704 */ 1705 int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index, 1706 unsigned int address, void *buf, size_t size) 1707 { 1708 struct retimer_info info = { .port = port, .index = index }; 1709 1710 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, 1711 usb4_port_retimer_nvm_read_block, &info); 1712 } 1713 1714 /** 1715 * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate 1716 * @port: USB3 adapter port 1717 * 1718 * Return maximum supported link rate of a USB3 adapter in Mb/s. 1719 * Negative errno in case of error. 1720 */ 1721 int usb4_usb3_port_max_link_rate(struct tb_port *port) 1722 { 1723 int ret, lr; 1724 u32 val; 1725 1726 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port)) 1727 return -EINVAL; 1728 1729 ret = tb_port_read(port, &val, TB_CFG_PORT, 1730 port->cap_adap + ADP_USB3_CS_4, 1); 1731 if (ret) 1732 return ret; 1733 1734 lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT; 1735 return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000; 1736 } 1737 1738 /** 1739 * usb4_usb3_port_actual_link_rate() - Established USB3 link rate 1740 * @port: USB3 adapter port 1741 * 1742 * Return actual established link rate of a USB3 adapter in Mb/s. If the 1743 * link is not up returns %0 and negative errno in case of failure. 1744 */ 1745 int usb4_usb3_port_actual_link_rate(struct tb_port *port) 1746 { 1747 int ret, lr; 1748 u32 val; 1749 1750 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port)) 1751 return -EINVAL; 1752 1753 ret = tb_port_read(port, &val, TB_CFG_PORT, 1754 port->cap_adap + ADP_USB3_CS_4, 1); 1755 if (ret) 1756 return ret; 1757 1758 if (!(val & ADP_USB3_CS_4_ULV)) 1759 return 0; 1760 1761 lr = val & ADP_USB3_CS_4_ALR_MASK; 1762 return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000; 1763 } 1764 1765 static int usb4_usb3_port_cm_request(struct tb_port *port, bool request) 1766 { 1767 int ret; 1768 u32 val; 1769 1770 if (!tb_port_is_usb3_down(port)) 1771 return -EINVAL; 1772 if (tb_route(port->sw)) 1773 return -EINVAL; 1774 1775 ret = tb_port_read(port, &val, TB_CFG_PORT, 1776 port->cap_adap + ADP_USB3_CS_2, 1); 1777 if (ret) 1778 return ret; 1779 1780 if (request) 1781 val |= ADP_USB3_CS_2_CMR; 1782 else 1783 val &= ~ADP_USB3_CS_2_CMR; 1784 1785 ret = tb_port_write(port, &val, TB_CFG_PORT, 1786 port->cap_adap + ADP_USB3_CS_2, 1); 1787 if (ret) 1788 return ret; 1789 1790 /* 1791 * We can use val here directly as the CMR bit is in the same place 1792 * as HCA. Just mask out others. 1793 */ 1794 val &= ADP_USB3_CS_2_CMR; 1795 return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1, 1796 ADP_USB3_CS_1_HCA, val, 1500); 1797 } 1798 1799 static inline int usb4_usb3_port_set_cm_request(struct tb_port *port) 1800 { 1801 return usb4_usb3_port_cm_request(port, true); 1802 } 1803 1804 static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port) 1805 { 1806 return usb4_usb3_port_cm_request(port, false); 1807 } 1808 1809 static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale) 1810 { 1811 unsigned long uframes; 1812 1813 uframes = bw * 512UL << scale; 1814 return DIV_ROUND_CLOSEST(uframes * 8000, 1000 * 1000); 1815 } 1816 1817 static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale) 1818 { 1819 unsigned long uframes; 1820 1821 /* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */ 1822 uframes = ((unsigned long)mbps * 1000 * 1000) / 8000; 1823 return DIV_ROUND_UP(uframes, 512UL << scale); 1824 } 1825 1826 static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port, 1827 int *upstream_bw, 1828 int *downstream_bw) 1829 { 1830 u32 val, bw, scale; 1831 int ret; 1832 1833 ret = tb_port_read(port, &val, TB_CFG_PORT, 1834 port->cap_adap + ADP_USB3_CS_2, 1); 1835 if (ret) 1836 return ret; 1837 1838 ret = tb_port_read(port, &scale, TB_CFG_PORT, 1839 port->cap_adap + ADP_USB3_CS_3, 1); 1840 if (ret) 1841 return ret; 1842 1843 scale &= ADP_USB3_CS_3_SCALE_MASK; 1844 1845 bw = val & ADP_USB3_CS_2_AUBW_MASK; 1846 *upstream_bw = usb3_bw_to_mbps(bw, scale); 1847 1848 bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT; 1849 *downstream_bw = usb3_bw_to_mbps(bw, scale); 1850 1851 return 0; 1852 } 1853 1854 /** 1855 * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3 1856 * @port: USB3 adapter port 1857 * @upstream_bw: Allocated upstream bandwidth is stored here 1858 * @downstream_bw: Allocated downstream bandwidth is stored here 1859 * 1860 * Stores currently allocated USB3 bandwidth into @upstream_bw and 1861 * @downstream_bw in Mb/s. Returns %0 in case of success and negative 1862 * errno in failure. 1863 */ 1864 int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw, 1865 int *downstream_bw) 1866 { 1867 int ret; 1868 1869 ret = usb4_usb3_port_set_cm_request(port); 1870 if (ret) 1871 return ret; 1872 1873 ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw, 1874 downstream_bw); 1875 usb4_usb3_port_clear_cm_request(port); 1876 1877 return ret; 1878 } 1879 1880 static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port, 1881 int *upstream_bw, 1882 int *downstream_bw) 1883 { 1884 u32 val, bw, scale; 1885 int ret; 1886 1887 ret = tb_port_read(port, &val, TB_CFG_PORT, 1888 port->cap_adap + ADP_USB3_CS_1, 1); 1889 if (ret) 1890 return ret; 1891 1892 ret = tb_port_read(port, &scale, TB_CFG_PORT, 1893 port->cap_adap + ADP_USB3_CS_3, 1); 1894 if (ret) 1895 return ret; 1896 1897 scale &= ADP_USB3_CS_3_SCALE_MASK; 1898 1899 bw = val & ADP_USB3_CS_1_CUBW_MASK; 1900 *upstream_bw = usb3_bw_to_mbps(bw, scale); 1901 1902 bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT; 1903 *downstream_bw = usb3_bw_to_mbps(bw, scale); 1904 1905 return 0; 1906 } 1907 1908 static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port, 1909 int upstream_bw, 1910 int downstream_bw) 1911 { 1912 u32 val, ubw, dbw, scale; 1913 int ret; 1914 1915 /* Read the used scale, hardware default is 0 */ 1916 ret = tb_port_read(port, &scale, TB_CFG_PORT, 1917 port->cap_adap + ADP_USB3_CS_3, 1); 1918 if (ret) 1919 return ret; 1920 1921 scale &= ADP_USB3_CS_3_SCALE_MASK; 1922 ubw = mbps_to_usb3_bw(upstream_bw, scale); 1923 dbw = mbps_to_usb3_bw(downstream_bw, scale); 1924 1925 ret = tb_port_read(port, &val, TB_CFG_PORT, 1926 port->cap_adap + ADP_USB3_CS_2, 1); 1927 if (ret) 1928 return ret; 1929 1930 val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK); 1931 val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT; 1932 val |= ubw; 1933 1934 return tb_port_write(port, &val, TB_CFG_PORT, 1935 port->cap_adap + ADP_USB3_CS_2, 1); 1936 } 1937 1938 /** 1939 * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3 1940 * @port: USB3 adapter port 1941 * @upstream_bw: New upstream bandwidth 1942 * @downstream_bw: New downstream bandwidth 1943 * 1944 * This can be used to set how much bandwidth is allocated for the USB3 1945 * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the 1946 * new values programmed to the USB3 adapter allocation registers. If 1947 * the values are lower than what is currently consumed the allocation 1948 * is set to what is currently consumed instead (consumed bandwidth 1949 * cannot be taken away by CM). The actual new values are returned in 1950 * @upstream_bw and @downstream_bw. 1951 * 1952 * Returns %0 in case of success and negative errno if there was a 1953 * failure. 1954 */ 1955 int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw, 1956 int *downstream_bw) 1957 { 1958 int ret, consumed_up, consumed_down, allocate_up, allocate_down; 1959 1960 ret = usb4_usb3_port_set_cm_request(port); 1961 if (ret) 1962 return ret; 1963 1964 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up, 1965 &consumed_down); 1966 if (ret) 1967 goto err_request; 1968 1969 /* Don't allow it go lower than what is consumed */ 1970 allocate_up = max(*upstream_bw, consumed_up); 1971 allocate_down = max(*downstream_bw, consumed_down); 1972 1973 ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up, 1974 allocate_down); 1975 if (ret) 1976 goto err_request; 1977 1978 *upstream_bw = allocate_up; 1979 *downstream_bw = allocate_down; 1980 1981 err_request: 1982 usb4_usb3_port_clear_cm_request(port); 1983 return ret; 1984 } 1985 1986 /** 1987 * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth 1988 * @port: USB3 adapter port 1989 * @upstream_bw: New allocated upstream bandwidth 1990 * @downstream_bw: New allocated downstream bandwidth 1991 * 1992 * Releases USB3 allocated bandwidth down to what is actually consumed. 1993 * The new bandwidth is returned in @upstream_bw and @downstream_bw. 1994 * 1995 * Returns 0% in success and negative errno in case of failure. 1996 */ 1997 int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw, 1998 int *downstream_bw) 1999 { 2000 int ret, consumed_up, consumed_down; 2001 2002 ret = usb4_usb3_port_set_cm_request(port); 2003 if (ret) 2004 return ret; 2005 2006 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up, 2007 &consumed_down); 2008 if (ret) 2009 goto err_request; 2010 2011 /* 2012 * Always keep 1000 Mb/s to make sure xHCI has at least some 2013 * bandwidth available for isochronous traffic. 2014 */ 2015 if (consumed_up < 1000) 2016 consumed_up = 1000; 2017 if (consumed_down < 1000) 2018 consumed_down = 1000; 2019 2020 ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up, 2021 consumed_down); 2022 if (ret) 2023 goto err_request; 2024 2025 *upstream_bw = consumed_up; 2026 *downstream_bw = consumed_down; 2027 2028 err_request: 2029 usb4_usb3_port_clear_cm_request(port); 2030 return ret; 2031 } 2032