1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * USB4 specific functionality 4 * 5 * Copyright (C) 2019, Intel Corporation 6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> 7 * Rajmohan Mani <rajmohan.mani@intel.com> 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/ktime.h> 12 #include <linux/units.h> 13 14 #include "sb_regs.h" 15 #include "tb.h" 16 17 #define USB4_DATA_RETRIES 3 18 19 enum usb4_sb_target { 20 USB4_SB_TARGET_ROUTER, 21 USB4_SB_TARGET_PARTNER, 22 USB4_SB_TARGET_RETIMER, 23 }; 24 25 #define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2) 26 #define USB4_NVM_READ_OFFSET_SHIFT 2 27 #define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24) 28 #define USB4_NVM_READ_LENGTH_SHIFT 24 29 30 #define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK 31 #define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT 32 33 #define USB4_DROM_ADDRESS_MASK GENMASK(14, 2) 34 #define USB4_DROM_ADDRESS_SHIFT 2 35 #define USB4_DROM_SIZE_MASK GENMASK(19, 15) 36 #define USB4_DROM_SIZE_SHIFT 15 37 38 #define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0) 39 40 #define USB4_BA_LENGTH_MASK GENMASK(7, 0) 41 #define USB4_BA_INDEX_MASK GENMASK(15, 0) 42 43 enum usb4_ba_index { 44 USB4_BA_MAX_USB3 = 0x1, 45 USB4_BA_MIN_DP_AUX = 0x2, 46 USB4_BA_MIN_DP_MAIN = 0x3, 47 USB4_BA_MAX_PCIE = 0x4, 48 USB4_BA_MAX_HI = 0x5, 49 }; 50 51 #define USB4_BA_VALUE_MASK GENMASK(31, 16) 52 #define USB4_BA_VALUE_SHIFT 16 53 54 static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode, 55 u32 *metadata, u8 *status, 56 const void *tx_data, size_t tx_dwords, 57 void *rx_data, size_t rx_dwords) 58 { 59 u32 val; 60 int ret; 61 62 if (metadata) { 63 ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); 64 if (ret) 65 return ret; 66 } 67 if (tx_dwords) { 68 ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9, 69 tx_dwords); 70 if (ret) 71 return ret; 72 } 73 74 val = opcode | ROUTER_CS_26_OV; 75 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); 76 if (ret) 77 return ret; 78 79 ret = tb_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500); 80 if (ret) 81 return ret; 82 83 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); 84 if (ret) 85 return ret; 86 87 if (val & ROUTER_CS_26_ONS) 88 return -EOPNOTSUPP; 89 90 if (status) 91 *status = (val & ROUTER_CS_26_STATUS_MASK) >> 92 ROUTER_CS_26_STATUS_SHIFT; 93 94 if (metadata) { 95 ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); 96 if (ret) 97 return ret; 98 } 99 if (rx_dwords) { 100 ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9, 101 rx_dwords); 102 if (ret) 103 return ret; 104 } 105 106 return 0; 107 } 108 109 static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, 110 u8 *status, const void *tx_data, size_t tx_dwords, 111 void *rx_data, size_t rx_dwords) 112 { 113 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 114 115 if (tx_dwords > NVM_DATA_DWORDS || rx_dwords > NVM_DATA_DWORDS) 116 return -EINVAL; 117 118 /* 119 * If the connection manager implementation provides USB4 router 120 * operation proxy callback, call it here instead of running the 121 * operation natively. 122 */ 123 if (cm_ops->usb4_switch_op) { 124 int ret; 125 126 ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status, 127 tx_data, tx_dwords, rx_data, 128 rx_dwords); 129 if (ret != -EOPNOTSUPP) 130 return ret; 131 132 /* 133 * If the proxy was not supported then run the native 134 * router operation instead. 135 */ 136 } 137 138 return usb4_native_switch_op(sw, opcode, metadata, status, tx_data, 139 tx_dwords, rx_data, rx_dwords); 140 } 141 142 static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode, 143 u32 *metadata, u8 *status) 144 { 145 return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0); 146 } 147 148 static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode, 149 u32 *metadata, u8 *status, 150 const void *tx_data, size_t tx_dwords, 151 void *rx_data, size_t rx_dwords) 152 { 153 return __usb4_switch_op(sw, opcode, metadata, status, tx_data, 154 tx_dwords, rx_data, rx_dwords); 155 } 156 157 static void usb4_switch_check_wakes(struct tb_switch *sw) 158 { 159 bool wakeup_usb4 = false; 160 struct usb4_port *usb4; 161 struct tb_port *port; 162 bool wakeup = false; 163 u32 val; 164 165 if (!device_may_wakeup(&sw->dev)) 166 return; 167 168 if (tb_route(sw)) { 169 if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1)) 170 return; 171 172 tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n", 173 (val & ROUTER_CS_6_WOPS) ? "yes" : "no", 174 (val & ROUTER_CS_6_WOUS) ? "yes" : "no"); 175 176 wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS); 177 } 178 179 /* 180 * Check for any downstream ports for USB4 wake, 181 * connection wake and disconnection wake. 182 */ 183 tb_switch_for_each_port(sw, port) { 184 if (!port->cap_usb4) 185 continue; 186 187 if (tb_port_read(port, &val, TB_CFG_PORT, 188 port->cap_usb4 + PORT_CS_18, 1)) 189 break; 190 191 tb_port_dbg(port, "USB4 wake: %s, connection wake: %s, disconnection wake: %s\n", 192 (val & PORT_CS_18_WOU4S) ? "yes" : "no", 193 (val & PORT_CS_18_WOCS) ? "yes" : "no", 194 (val & PORT_CS_18_WODS) ? "yes" : "no"); 195 196 wakeup_usb4 = val & (PORT_CS_18_WOU4S | PORT_CS_18_WOCS | 197 PORT_CS_18_WODS); 198 199 usb4 = port->usb4; 200 if (device_may_wakeup(&usb4->dev) && wakeup_usb4) 201 pm_wakeup_event(&usb4->dev, 0); 202 203 wakeup |= wakeup_usb4; 204 } 205 206 if (wakeup) 207 pm_wakeup_event(&sw->dev, 0); 208 } 209 210 static bool link_is_usb4(struct tb_port *port) 211 { 212 u32 val; 213 214 if (!port->cap_usb4) 215 return false; 216 217 if (tb_port_read(port, &val, TB_CFG_PORT, 218 port->cap_usb4 + PORT_CS_18, 1)) 219 return false; 220 221 return !(val & PORT_CS_18_TCM); 222 } 223 224 /** 225 * usb4_switch_setup() - Additional setup for USB4 device 226 * @sw: USB4 router to setup 227 * 228 * USB4 routers need additional settings in order to enable all the 229 * tunneling. This function enables USB and PCIe tunneling if it can be 230 * enabled (e.g the parent switch also supports them). If USB tunneling 231 * is not available for some reason (like that there is Thunderbolt 3 232 * switch upstream) then the internal xHCI controller is enabled 233 * instead. 234 */ 235 int usb4_switch_setup(struct tb_switch *sw) 236 { 237 struct tb_port *downstream_port; 238 struct tb_switch *parent; 239 bool tbt3, xhci; 240 u32 val = 0; 241 int ret; 242 243 usb4_switch_check_wakes(sw); 244 245 if (!tb_route(sw)) 246 return 0; 247 248 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1); 249 if (ret) 250 return ret; 251 252 parent = tb_switch_parent(sw); 253 downstream_port = tb_port_at(tb_route(sw), parent); 254 sw->link_usb4 = link_is_usb4(downstream_port); 255 tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT"); 256 257 xhci = val & ROUTER_CS_6_HCI; 258 tbt3 = !(val & ROUTER_CS_6_TNS); 259 260 tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n", 261 tbt3 ? "yes" : "no", xhci ? "yes" : "no"); 262 263 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 264 if (ret) 265 return ret; 266 267 if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 && 268 tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) { 269 val |= ROUTER_CS_5_UTO; 270 xhci = false; 271 } 272 273 /* 274 * Only enable PCIe tunneling if the parent router supports it 275 * and it is not disabled. 276 */ 277 if (tb_acpi_may_tunnel_pcie() && 278 tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) { 279 val |= ROUTER_CS_5_PTO; 280 /* 281 * xHCI can be enabled if PCIe tunneling is supported 282 * and the parent does not have any USB3 dowstream 283 * adapters (so we cannot do USB 3.x tunneling). 284 */ 285 if (xhci) 286 val |= ROUTER_CS_5_HCO; 287 } 288 289 /* TBT3 supported by the CM */ 290 val |= ROUTER_CS_5_C3S; 291 /* Tunneling configuration is ready now */ 292 val |= ROUTER_CS_5_CV; 293 294 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 295 if (ret) 296 return ret; 297 298 return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR, 299 ROUTER_CS_6_CR, 50); 300 } 301 302 /** 303 * usb4_switch_read_uid() - Read UID from USB4 router 304 * @sw: USB4 router 305 * @uid: UID is stored here 306 * 307 * Reads 64-bit UID from USB4 router config space. 308 */ 309 int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid) 310 { 311 return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2); 312 } 313 314 static int usb4_switch_drom_read_block(void *data, 315 unsigned int dwaddress, void *buf, 316 size_t dwords) 317 { 318 struct tb_switch *sw = data; 319 u8 status = 0; 320 u32 metadata; 321 int ret; 322 323 metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK; 324 metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) & 325 USB4_DROM_ADDRESS_MASK; 326 327 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata, 328 &status, NULL, 0, buf, dwords); 329 if (ret) 330 return ret; 331 332 return status ? -EIO : 0; 333 } 334 335 /** 336 * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM 337 * @sw: USB4 router 338 * @address: Byte address inside DROM to start reading 339 * @buf: Buffer where the DROM content is stored 340 * @size: Number of bytes to read from DROM 341 * 342 * Uses USB4 router operations to read router DROM. For devices this 343 * should always work but for hosts it may return %-EOPNOTSUPP in which 344 * case the host router does not have DROM. 345 */ 346 int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, 347 size_t size) 348 { 349 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, 350 usb4_switch_drom_read_block, sw); 351 } 352 353 /** 354 * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding 355 * @sw: USB4 router 356 * 357 * Checks whether conditions are met so that lane bonding can be 358 * established with the upstream router. Call only for device routers. 359 */ 360 bool usb4_switch_lane_bonding_possible(struct tb_switch *sw) 361 { 362 struct tb_port *up; 363 int ret; 364 u32 val; 365 366 up = tb_upstream_port(sw); 367 ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1); 368 if (ret) 369 return false; 370 371 return !!(val & PORT_CS_18_BE); 372 } 373 374 /** 375 * usb4_switch_set_wake() - Enabled/disable wake 376 * @sw: USB4 router 377 * @flags: Wakeup flags (%0 to disable) 378 * 379 * Enables/disables router to wake up from sleep. 380 */ 381 int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags) 382 { 383 struct usb4_port *usb4; 384 struct tb_port *port; 385 u64 route = tb_route(sw); 386 u32 val; 387 int ret; 388 389 /* 390 * Enable wakes coming from all USB4 downstream ports (from 391 * child routers). For device routers do this also for the 392 * upstream USB4 port. 393 */ 394 tb_switch_for_each_port(sw, port) { 395 if (!tb_port_is_null(port)) 396 continue; 397 if (!route && tb_is_upstream_port(port)) 398 continue; 399 if (!port->cap_usb4) 400 continue; 401 402 ret = tb_port_read(port, &val, TB_CFG_PORT, 403 port->cap_usb4 + PORT_CS_19, 1); 404 if (ret) 405 return ret; 406 407 val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4); 408 409 if (tb_is_upstream_port(port)) { 410 val |= PORT_CS_19_WOU4; 411 } else { 412 bool configured = val & PORT_CS_19_PC; 413 usb4 = port->usb4; 414 415 if (((flags & TB_WAKE_ON_CONNECT) | 416 device_may_wakeup(&usb4->dev)) && !configured) 417 val |= PORT_CS_19_WOC; 418 if (((flags & TB_WAKE_ON_DISCONNECT) | 419 device_may_wakeup(&usb4->dev)) && configured) 420 val |= PORT_CS_19_WOD; 421 if ((flags & TB_WAKE_ON_USB4) && configured) 422 val |= PORT_CS_19_WOU4; 423 } 424 425 ret = tb_port_write(port, &val, TB_CFG_PORT, 426 port->cap_usb4 + PORT_CS_19, 1); 427 if (ret) 428 return ret; 429 } 430 431 /* 432 * Enable wakes from PCIe, USB 3.x and DP on this router. Only 433 * needed for device routers. 434 */ 435 if (route) { 436 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 437 if (ret) 438 return ret; 439 440 val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU | ROUTER_CS_5_WOD); 441 if (flags & TB_WAKE_ON_USB3) 442 val |= ROUTER_CS_5_WOU; 443 if (flags & TB_WAKE_ON_PCIE) 444 val |= ROUTER_CS_5_WOP; 445 if (flags & TB_WAKE_ON_DP) 446 val |= ROUTER_CS_5_WOD; 447 448 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 449 if (ret) 450 return ret; 451 } 452 453 return 0; 454 } 455 456 /** 457 * usb4_switch_set_sleep() - Prepare the router to enter sleep 458 * @sw: USB4 router 459 * 460 * Sets sleep bit for the router. Returns when the router sleep ready 461 * bit has been asserted. 462 */ 463 int usb4_switch_set_sleep(struct tb_switch *sw) 464 { 465 int ret; 466 u32 val; 467 468 /* Set sleep bit and wait for sleep ready to be asserted */ 469 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 470 if (ret) 471 return ret; 472 473 val |= ROUTER_CS_5_SLP; 474 475 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 476 if (ret) 477 return ret; 478 479 return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR, 480 ROUTER_CS_6_SLPR, 500); 481 } 482 483 /** 484 * usb4_switch_nvm_sector_size() - Return router NVM sector size 485 * @sw: USB4 router 486 * 487 * If the router supports NVM operations this function returns the NVM 488 * sector size in bytes. If NVM operations are not supported returns 489 * %-EOPNOTSUPP. 490 */ 491 int usb4_switch_nvm_sector_size(struct tb_switch *sw) 492 { 493 u32 metadata; 494 u8 status; 495 int ret; 496 497 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata, 498 &status); 499 if (ret) 500 return ret; 501 502 if (status) 503 return status == 0x2 ? -EOPNOTSUPP : -EIO; 504 505 return metadata & USB4_NVM_SECTOR_SIZE_MASK; 506 } 507 508 static int usb4_switch_nvm_read_block(void *data, 509 unsigned int dwaddress, void *buf, size_t dwords) 510 { 511 struct tb_switch *sw = data; 512 u8 status = 0; 513 u32 metadata; 514 int ret; 515 516 metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) & 517 USB4_NVM_READ_LENGTH_MASK; 518 metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) & 519 USB4_NVM_READ_OFFSET_MASK; 520 521 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata, 522 &status, NULL, 0, buf, dwords); 523 if (ret) 524 return ret; 525 526 return status ? -EIO : 0; 527 } 528 529 /** 530 * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM 531 * @sw: USB4 router 532 * @address: Starting address in bytes 533 * @buf: Read data is placed here 534 * @size: How many bytes to read 535 * 536 * Reads NVM contents of the router. If NVM is not supported returns 537 * %-EOPNOTSUPP. 538 */ 539 int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, 540 size_t size) 541 { 542 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, 543 usb4_switch_nvm_read_block, sw); 544 } 545 546 /** 547 * usb4_switch_nvm_set_offset() - Set NVM write offset 548 * @sw: USB4 router 549 * @address: Start offset 550 * 551 * Explicitly sets NVM write offset. Normally when writing to NVM this 552 * is done automatically by usb4_switch_nvm_write(). 553 * 554 * Returns %0 in success and negative errno if there was a failure. 555 */ 556 int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address) 557 { 558 u32 metadata, dwaddress; 559 u8 status = 0; 560 int ret; 561 562 dwaddress = address / 4; 563 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & 564 USB4_NVM_SET_OFFSET_MASK; 565 566 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata, 567 &status); 568 if (ret) 569 return ret; 570 571 return status ? -EIO : 0; 572 } 573 574 static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress, 575 const void *buf, size_t dwords) 576 { 577 struct tb_switch *sw = data; 578 u8 status; 579 int ret; 580 581 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status, 582 buf, dwords, NULL, 0); 583 if (ret) 584 return ret; 585 586 return status ? -EIO : 0; 587 } 588 589 /** 590 * usb4_switch_nvm_write() - Write to the router NVM 591 * @sw: USB4 router 592 * @address: Start address where to write in bytes 593 * @buf: Pointer to the data to write 594 * @size: Size of @buf in bytes 595 * 596 * Writes @buf to the router NVM using USB4 router operations. If NVM 597 * write is not supported returns %-EOPNOTSUPP. 598 */ 599 int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, 600 const void *buf, size_t size) 601 { 602 int ret; 603 604 ret = usb4_switch_nvm_set_offset(sw, address); 605 if (ret) 606 return ret; 607 608 return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES, 609 usb4_switch_nvm_write_next_block, sw); 610 } 611 612 /** 613 * usb4_switch_nvm_authenticate() - Authenticate new NVM 614 * @sw: USB4 router 615 * 616 * After the new NVM has been written via usb4_switch_nvm_write(), this 617 * function triggers NVM authentication process. The router gets power 618 * cycled and if the authentication is successful the new NVM starts 619 * running. In case of failure returns negative errno. 620 * 621 * The caller should call usb4_switch_nvm_authenticate_status() to read 622 * the status of the authentication after power cycle. It should be the 623 * first router operation to avoid the status being lost. 624 */ 625 int usb4_switch_nvm_authenticate(struct tb_switch *sw) 626 { 627 int ret; 628 629 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL); 630 switch (ret) { 631 /* 632 * The router is power cycled once NVM_AUTH is started so it is 633 * expected to get any of the following errors back. 634 */ 635 case -EACCES: 636 case -ENOTCONN: 637 case -ETIMEDOUT: 638 return 0; 639 640 default: 641 return ret; 642 } 643 } 644 645 /** 646 * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate 647 * @sw: USB4 router 648 * @status: Status code of the operation 649 * 650 * The function checks if there is status available from the last NVM 651 * authenticate router operation. If there is status then %0 is returned 652 * and the status code is placed in @status. Returns negative errno in case 653 * of failure. 654 * 655 * Must be called before any other router operation. 656 */ 657 int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status) 658 { 659 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 660 u16 opcode; 661 u32 val; 662 int ret; 663 664 if (cm_ops->usb4_switch_nvm_authenticate_status) { 665 ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status); 666 if (ret != -EOPNOTSUPP) 667 return ret; 668 } 669 670 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); 671 if (ret) 672 return ret; 673 674 /* Check that the opcode is correct */ 675 opcode = val & ROUTER_CS_26_OPCODE_MASK; 676 if (opcode == USB4_SWITCH_OP_NVM_AUTH) { 677 if (val & ROUTER_CS_26_OV) 678 return -EBUSY; 679 if (val & ROUTER_CS_26_ONS) 680 return -EOPNOTSUPP; 681 682 *status = (val & ROUTER_CS_26_STATUS_MASK) >> 683 ROUTER_CS_26_STATUS_SHIFT; 684 } else { 685 *status = 0; 686 } 687 688 return 0; 689 } 690 691 /** 692 * usb4_switch_credits_init() - Read buffer allocation parameters 693 * @sw: USB4 router 694 * 695 * Reads @sw buffer allocation parameters and initializes @sw buffer 696 * allocation fields accordingly. Specifically @sw->credits_allocation 697 * is set to %true if these parameters can be used in tunneling. 698 * 699 * Returns %0 on success and negative errno otherwise. 700 */ 701 int usb4_switch_credits_init(struct tb_switch *sw) 702 { 703 int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma; 704 int ret, length, i, nports; 705 const struct tb_port *port; 706 u32 data[NVM_DATA_DWORDS]; 707 u32 metadata = 0; 708 u8 status = 0; 709 710 memset(data, 0, sizeof(data)); 711 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_BUFFER_ALLOC, &metadata, 712 &status, NULL, 0, data, ARRAY_SIZE(data)); 713 if (ret) 714 return ret; 715 if (status) 716 return -EIO; 717 718 length = metadata & USB4_BA_LENGTH_MASK; 719 if (WARN_ON(length > ARRAY_SIZE(data))) 720 return -EMSGSIZE; 721 722 max_usb3 = -1; 723 min_dp_aux = -1; 724 min_dp_main = -1; 725 max_pcie = -1; 726 max_dma = -1; 727 728 tb_sw_dbg(sw, "credit allocation parameters:\n"); 729 730 for (i = 0; i < length; i++) { 731 u16 index, value; 732 733 index = data[i] & USB4_BA_INDEX_MASK; 734 value = (data[i] & USB4_BA_VALUE_MASK) >> USB4_BA_VALUE_SHIFT; 735 736 switch (index) { 737 case USB4_BA_MAX_USB3: 738 tb_sw_dbg(sw, " USB3: %u\n", value); 739 max_usb3 = value; 740 break; 741 case USB4_BA_MIN_DP_AUX: 742 tb_sw_dbg(sw, " DP AUX: %u\n", value); 743 min_dp_aux = value; 744 break; 745 case USB4_BA_MIN_DP_MAIN: 746 tb_sw_dbg(sw, " DP main: %u\n", value); 747 min_dp_main = value; 748 break; 749 case USB4_BA_MAX_PCIE: 750 tb_sw_dbg(sw, " PCIe: %u\n", value); 751 max_pcie = value; 752 break; 753 case USB4_BA_MAX_HI: 754 tb_sw_dbg(sw, " DMA: %u\n", value); 755 max_dma = value; 756 break; 757 default: 758 tb_sw_dbg(sw, " unknown credit allocation index %#x, skipping\n", 759 index); 760 break; 761 } 762 } 763 764 /* 765 * Validate the buffer allocation preferences. If we find 766 * issues, log a warning and fall back using the hard-coded 767 * values. 768 */ 769 770 /* Host router must report baMaxHI */ 771 if (!tb_route(sw) && max_dma < 0) { 772 tb_sw_warn(sw, "host router is missing baMaxHI\n"); 773 goto err_invalid; 774 } 775 776 nports = 0; 777 tb_switch_for_each_port(sw, port) { 778 if (tb_port_is_null(port)) 779 nports++; 780 } 781 782 /* Must have DP buffer allocation (multiple USB4 ports) */ 783 if (nports > 2 && (min_dp_aux < 0 || min_dp_main < 0)) { 784 tb_sw_warn(sw, "multiple USB4 ports require baMinDPaux/baMinDPmain\n"); 785 goto err_invalid; 786 } 787 788 tb_switch_for_each_port(sw, port) { 789 if (tb_port_is_dpout(port) && min_dp_main < 0) { 790 tb_sw_warn(sw, "missing baMinDPmain"); 791 goto err_invalid; 792 } 793 if ((tb_port_is_dpin(port) || tb_port_is_dpout(port)) && 794 min_dp_aux < 0) { 795 tb_sw_warn(sw, "missing baMinDPaux"); 796 goto err_invalid; 797 } 798 if ((tb_port_is_usb3_down(port) || tb_port_is_usb3_up(port)) && 799 max_usb3 < 0) { 800 tb_sw_warn(sw, "missing baMaxUSB3"); 801 goto err_invalid; 802 } 803 if ((tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) && 804 max_pcie < 0) { 805 tb_sw_warn(sw, "missing baMaxPCIe"); 806 goto err_invalid; 807 } 808 } 809 810 /* 811 * Buffer allocation passed the validation so we can use it in 812 * path creation. 813 */ 814 sw->credit_allocation = true; 815 if (max_usb3 > 0) 816 sw->max_usb3_credits = max_usb3; 817 if (min_dp_aux > 0) 818 sw->min_dp_aux_credits = min_dp_aux; 819 if (min_dp_main > 0) 820 sw->min_dp_main_credits = min_dp_main; 821 if (max_pcie > 0) 822 sw->max_pcie_credits = max_pcie; 823 if (max_dma > 0) 824 sw->max_dma_credits = max_dma; 825 826 return 0; 827 828 err_invalid: 829 return -EINVAL; 830 } 831 832 /** 833 * usb4_switch_query_dp_resource() - Query availability of DP IN resource 834 * @sw: USB4 router 835 * @in: DP IN adapter 836 * 837 * For DP tunneling this function can be used to query availability of 838 * DP IN resource. Returns true if the resource is available for DP 839 * tunneling, false otherwise. 840 */ 841 bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) 842 { 843 u32 metadata = in->port; 844 u8 status; 845 int ret; 846 847 ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata, 848 &status); 849 /* 850 * If DP resource allocation is not supported assume it is 851 * always available. 852 */ 853 if (ret == -EOPNOTSUPP) 854 return true; 855 if (ret) 856 return false; 857 858 return !status; 859 } 860 861 /** 862 * usb4_switch_alloc_dp_resource() - Allocate DP IN resource 863 * @sw: USB4 router 864 * @in: DP IN adapter 865 * 866 * Allocates DP IN resource for DP tunneling using USB4 router 867 * operations. If the resource was allocated returns %0. Otherwise 868 * returns negative errno, in particular %-EBUSY if the resource is 869 * already allocated. 870 */ 871 int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 872 { 873 u32 metadata = in->port; 874 u8 status; 875 int ret; 876 877 ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata, 878 &status); 879 if (ret == -EOPNOTSUPP) 880 return 0; 881 if (ret) 882 return ret; 883 884 return status ? -EBUSY : 0; 885 } 886 887 /** 888 * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource 889 * @sw: USB4 router 890 * @in: DP IN adapter 891 * 892 * Releases the previously allocated DP IN resource. 893 */ 894 int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 895 { 896 u32 metadata = in->port; 897 u8 status; 898 int ret; 899 900 ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata, 901 &status); 902 if (ret == -EOPNOTSUPP) 903 return 0; 904 if (ret) 905 return ret; 906 907 return status ? -EIO : 0; 908 } 909 910 static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port) 911 { 912 struct tb_port *p; 913 int usb4_idx = 0; 914 915 /* Assume port is primary */ 916 tb_switch_for_each_port(sw, p) { 917 if (!tb_port_is_null(p)) 918 continue; 919 if (tb_is_upstream_port(p)) 920 continue; 921 if (!p->link_nr) { 922 if (p == port) 923 break; 924 usb4_idx++; 925 } 926 } 927 928 return usb4_idx; 929 } 930 931 /** 932 * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter 933 * @sw: USB4 router 934 * @port: USB4 port 935 * 936 * USB4 routers have direct mapping between USB4 ports and PCIe 937 * downstream adapters where the PCIe topology is extended. This 938 * function returns the corresponding downstream PCIe adapter or %NULL 939 * if no such mapping was possible. 940 */ 941 struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, 942 const struct tb_port *port) 943 { 944 int usb4_idx = usb4_port_idx(sw, port); 945 struct tb_port *p; 946 int pcie_idx = 0; 947 948 /* Find PCIe down port matching usb4_port */ 949 tb_switch_for_each_port(sw, p) { 950 if (!tb_port_is_pcie_down(p)) 951 continue; 952 953 if (pcie_idx == usb4_idx) 954 return p; 955 956 pcie_idx++; 957 } 958 959 return NULL; 960 } 961 962 /** 963 * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter 964 * @sw: USB4 router 965 * @port: USB4 port 966 * 967 * USB4 routers have direct mapping between USB4 ports and USB 3.x 968 * downstream adapters where the USB 3.x topology is extended. This 969 * function returns the corresponding downstream USB 3.x adapter or 970 * %NULL if no such mapping was possible. 971 */ 972 struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, 973 const struct tb_port *port) 974 { 975 int usb4_idx = usb4_port_idx(sw, port); 976 struct tb_port *p; 977 int usb_idx = 0; 978 979 /* Find USB3 down port matching usb4_port */ 980 tb_switch_for_each_port(sw, p) { 981 if (!tb_port_is_usb3_down(p)) 982 continue; 983 984 if (usb_idx == usb4_idx) 985 return p; 986 987 usb_idx++; 988 } 989 990 return NULL; 991 } 992 993 /** 994 * usb4_switch_add_ports() - Add USB4 ports for this router 995 * @sw: USB4 router 996 * 997 * For USB4 router finds all USB4 ports and registers devices for each. 998 * Can be called to any router. 999 * 1000 * Return %0 in case of success and negative errno in case of failure. 1001 */ 1002 int usb4_switch_add_ports(struct tb_switch *sw) 1003 { 1004 struct tb_port *port; 1005 1006 if (tb_switch_is_icm(sw) || !tb_switch_is_usb4(sw)) 1007 return 0; 1008 1009 tb_switch_for_each_port(sw, port) { 1010 struct usb4_port *usb4; 1011 1012 if (!tb_port_is_null(port)) 1013 continue; 1014 if (!port->cap_usb4) 1015 continue; 1016 1017 usb4 = usb4_port_device_add(port); 1018 if (IS_ERR(usb4)) { 1019 usb4_switch_remove_ports(sw); 1020 return PTR_ERR(usb4); 1021 } 1022 1023 port->usb4 = usb4; 1024 } 1025 1026 return 0; 1027 } 1028 1029 /** 1030 * usb4_switch_remove_ports() - Removes USB4 ports from this router 1031 * @sw: USB4 router 1032 * 1033 * Unregisters previously registered USB4 ports. 1034 */ 1035 void usb4_switch_remove_ports(struct tb_switch *sw) 1036 { 1037 struct tb_port *port; 1038 1039 tb_switch_for_each_port(sw, port) { 1040 if (port->usb4) { 1041 usb4_port_device_remove(port->usb4); 1042 port->usb4 = NULL; 1043 } 1044 } 1045 } 1046 1047 /** 1048 * usb4_port_unlock() - Unlock USB4 downstream port 1049 * @port: USB4 port to unlock 1050 * 1051 * Unlocks USB4 downstream port so that the connection manager can 1052 * access the router below this port. 1053 */ 1054 int usb4_port_unlock(struct tb_port *port) 1055 { 1056 int ret; 1057 u32 val; 1058 1059 ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1); 1060 if (ret) 1061 return ret; 1062 1063 val &= ~ADP_CS_4_LCK; 1064 return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1); 1065 } 1066 1067 /** 1068 * usb4_port_hotplug_enable() - Enables hotplug for a port 1069 * @port: USB4 port to operate on 1070 * 1071 * Enables hot plug events on a given port. This is only intended 1072 * to be used on lane, DP-IN, and DP-OUT adapters. 1073 */ 1074 int usb4_port_hotplug_enable(struct tb_port *port) 1075 { 1076 int ret; 1077 u32 val; 1078 1079 ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1); 1080 if (ret) 1081 return ret; 1082 1083 val &= ~ADP_CS_5_DHP; 1084 return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1); 1085 } 1086 1087 static int usb4_port_set_configured(struct tb_port *port, bool configured) 1088 { 1089 int ret; 1090 u32 val; 1091 1092 if (!port->cap_usb4) 1093 return -EINVAL; 1094 1095 ret = tb_port_read(port, &val, TB_CFG_PORT, 1096 port->cap_usb4 + PORT_CS_19, 1); 1097 if (ret) 1098 return ret; 1099 1100 if (configured) 1101 val |= PORT_CS_19_PC; 1102 else 1103 val &= ~PORT_CS_19_PC; 1104 1105 return tb_port_write(port, &val, TB_CFG_PORT, 1106 port->cap_usb4 + PORT_CS_19, 1); 1107 } 1108 1109 /** 1110 * usb4_port_configure() - Set USB4 port configured 1111 * @port: USB4 router 1112 * 1113 * Sets the USB4 link to be configured for power management purposes. 1114 */ 1115 int usb4_port_configure(struct tb_port *port) 1116 { 1117 return usb4_port_set_configured(port, true); 1118 } 1119 1120 /** 1121 * usb4_port_unconfigure() - Set USB4 port unconfigured 1122 * @port: USB4 router 1123 * 1124 * Sets the USB4 link to be unconfigured for power management purposes. 1125 */ 1126 void usb4_port_unconfigure(struct tb_port *port) 1127 { 1128 usb4_port_set_configured(port, false); 1129 } 1130 1131 static int usb4_set_xdomain_configured(struct tb_port *port, bool configured) 1132 { 1133 int ret; 1134 u32 val; 1135 1136 if (!port->cap_usb4) 1137 return -EINVAL; 1138 1139 ret = tb_port_read(port, &val, TB_CFG_PORT, 1140 port->cap_usb4 + PORT_CS_19, 1); 1141 if (ret) 1142 return ret; 1143 1144 if (configured) 1145 val |= PORT_CS_19_PID; 1146 else 1147 val &= ~PORT_CS_19_PID; 1148 1149 return tb_port_write(port, &val, TB_CFG_PORT, 1150 port->cap_usb4 + PORT_CS_19, 1); 1151 } 1152 1153 /** 1154 * usb4_port_configure_xdomain() - Configure port for XDomain 1155 * @port: USB4 port connected to another host 1156 * @xd: XDomain that is connected to the port 1157 * 1158 * Marks the USB4 port as being connected to another host and updates 1159 * the link type. Returns %0 in success and negative errno in failure. 1160 */ 1161 int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd) 1162 { 1163 xd->link_usb4 = link_is_usb4(port); 1164 return usb4_set_xdomain_configured(port, true); 1165 } 1166 1167 /** 1168 * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain 1169 * @port: USB4 port that was connected to another host 1170 * 1171 * Clears USB4 port from being marked as XDomain. 1172 */ 1173 void usb4_port_unconfigure_xdomain(struct tb_port *port) 1174 { 1175 usb4_set_xdomain_configured(port, false); 1176 } 1177 1178 static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit, 1179 u32 value, int timeout_msec) 1180 { 1181 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 1182 1183 do { 1184 u32 val; 1185 int ret; 1186 1187 ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1); 1188 if (ret) 1189 return ret; 1190 1191 if ((val & bit) == value) 1192 return 0; 1193 1194 usleep_range(50, 100); 1195 } while (ktime_before(ktime_get(), timeout)); 1196 1197 return -ETIMEDOUT; 1198 } 1199 1200 static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords) 1201 { 1202 if (dwords > NVM_DATA_DWORDS) 1203 return -EINVAL; 1204 1205 return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2, 1206 dwords); 1207 } 1208 1209 static int usb4_port_write_data(struct tb_port *port, const void *data, 1210 size_t dwords) 1211 { 1212 if (dwords > NVM_DATA_DWORDS) 1213 return -EINVAL; 1214 1215 return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2, 1216 dwords); 1217 } 1218 1219 static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, 1220 u8 index, u8 reg, void *buf, u8 size) 1221 { 1222 size_t dwords = DIV_ROUND_UP(size, 4); 1223 int ret; 1224 u32 val; 1225 1226 if (!port->cap_usb4) 1227 return -EINVAL; 1228 1229 val = reg; 1230 val |= size << PORT_CS_1_LENGTH_SHIFT; 1231 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK; 1232 if (target == USB4_SB_TARGET_RETIMER) 1233 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT); 1234 val |= PORT_CS_1_PND; 1235 1236 ret = tb_port_write(port, &val, TB_CFG_PORT, 1237 port->cap_usb4 + PORT_CS_1, 1); 1238 if (ret) 1239 return ret; 1240 1241 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1, 1242 PORT_CS_1_PND, 0, 500); 1243 if (ret) 1244 return ret; 1245 1246 ret = tb_port_read(port, &val, TB_CFG_PORT, 1247 port->cap_usb4 + PORT_CS_1, 1); 1248 if (ret) 1249 return ret; 1250 1251 if (val & PORT_CS_1_NR) 1252 return -ENODEV; 1253 if (val & PORT_CS_1_RC) 1254 return -EIO; 1255 1256 return buf ? usb4_port_read_data(port, buf, dwords) : 0; 1257 } 1258 1259 static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target, 1260 u8 index, u8 reg, const void *buf, u8 size) 1261 { 1262 size_t dwords = DIV_ROUND_UP(size, 4); 1263 int ret; 1264 u32 val; 1265 1266 if (!port->cap_usb4) 1267 return -EINVAL; 1268 1269 if (buf) { 1270 ret = usb4_port_write_data(port, buf, dwords); 1271 if (ret) 1272 return ret; 1273 } 1274 1275 val = reg; 1276 val |= size << PORT_CS_1_LENGTH_SHIFT; 1277 val |= PORT_CS_1_WNR_WRITE; 1278 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK; 1279 if (target == USB4_SB_TARGET_RETIMER) 1280 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT); 1281 val |= PORT_CS_1_PND; 1282 1283 ret = tb_port_write(port, &val, TB_CFG_PORT, 1284 port->cap_usb4 + PORT_CS_1, 1); 1285 if (ret) 1286 return ret; 1287 1288 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1, 1289 PORT_CS_1_PND, 0, 500); 1290 if (ret) 1291 return ret; 1292 1293 ret = tb_port_read(port, &val, TB_CFG_PORT, 1294 port->cap_usb4 + PORT_CS_1, 1); 1295 if (ret) 1296 return ret; 1297 1298 if (val & PORT_CS_1_NR) 1299 return -ENODEV; 1300 if (val & PORT_CS_1_RC) 1301 return -EIO; 1302 1303 return 0; 1304 } 1305 1306 static int usb4_port_sb_opcode_err_to_errno(u32 val) 1307 { 1308 switch (val) { 1309 case 0: 1310 return 0; 1311 case USB4_SB_OPCODE_ERR: 1312 return -EAGAIN; 1313 case USB4_SB_OPCODE_ONS: 1314 return -EOPNOTSUPP; 1315 default: 1316 return -EIO; 1317 } 1318 } 1319 1320 static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target, 1321 u8 index, enum usb4_sb_opcode opcode, int timeout_msec) 1322 { 1323 ktime_t timeout; 1324 u32 val; 1325 int ret; 1326 1327 val = opcode; 1328 ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val, 1329 sizeof(val)); 1330 if (ret) 1331 return ret; 1332 1333 timeout = ktime_add_ms(ktime_get(), timeout_msec); 1334 1335 do { 1336 /* Check results */ 1337 ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE, 1338 &val, sizeof(val)); 1339 if (ret) 1340 return ret; 1341 1342 if (val != opcode) 1343 return usb4_port_sb_opcode_err_to_errno(val); 1344 } while (ktime_before(ktime_get(), timeout)); 1345 1346 return -ETIMEDOUT; 1347 } 1348 1349 static int usb4_port_set_router_offline(struct tb_port *port, bool offline) 1350 { 1351 u32 val = !offline; 1352 int ret; 1353 1354 ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1355 USB4_SB_METADATA, &val, sizeof(val)); 1356 if (ret) 1357 return ret; 1358 1359 val = USB4_SB_OPCODE_ROUTER_OFFLINE; 1360 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1361 USB4_SB_OPCODE, &val, sizeof(val)); 1362 } 1363 1364 /** 1365 * usb4_port_router_offline() - Put the USB4 port to offline mode 1366 * @port: USB4 port 1367 * 1368 * This function puts the USB4 port into offline mode. In this mode the 1369 * port does not react on hotplug events anymore. This needs to be 1370 * called before retimer access is done when the USB4 links is not up. 1371 * 1372 * Returns %0 in case of success and negative errno if there was an 1373 * error. 1374 */ 1375 int usb4_port_router_offline(struct tb_port *port) 1376 { 1377 return usb4_port_set_router_offline(port, true); 1378 } 1379 1380 /** 1381 * usb4_port_router_online() - Put the USB4 port back to online 1382 * @port: USB4 port 1383 * 1384 * Makes the USB4 port functional again. 1385 */ 1386 int usb4_port_router_online(struct tb_port *port) 1387 { 1388 return usb4_port_set_router_offline(port, false); 1389 } 1390 1391 /** 1392 * usb4_port_enumerate_retimers() - Send RT broadcast transaction 1393 * @port: USB4 port 1394 * 1395 * This forces the USB4 port to send broadcast RT transaction which 1396 * makes the retimers on the link to assign index to themselves. Returns 1397 * %0 in case of success and negative errno if there was an error. 1398 */ 1399 int usb4_port_enumerate_retimers(struct tb_port *port) 1400 { 1401 u32 val; 1402 1403 val = USB4_SB_OPCODE_ENUMERATE_RETIMERS; 1404 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1405 USB4_SB_OPCODE, &val, sizeof(val)); 1406 } 1407 1408 /** 1409 * usb4_port_clx_supported() - Check if CLx is supported by the link 1410 * @port: Port to check for CLx support for 1411 * 1412 * PORT_CS_18_CPS bit reflects if the link supports CLx including 1413 * active cables (if connected on the link). 1414 */ 1415 bool usb4_port_clx_supported(struct tb_port *port) 1416 { 1417 int ret; 1418 u32 val; 1419 1420 ret = tb_port_read(port, &val, TB_CFG_PORT, 1421 port->cap_usb4 + PORT_CS_18, 1); 1422 if (ret) 1423 return false; 1424 1425 return !!(val & PORT_CS_18_CPS); 1426 } 1427 1428 /** 1429 * usb4_port_margining_caps() - Read USB4 port marginig capabilities 1430 * @port: USB4 port 1431 * @caps: Array with at least two elements to hold the results 1432 * 1433 * Reads the USB4 port lane margining capabilities into @caps. 1434 */ 1435 int usb4_port_margining_caps(struct tb_port *port, u32 *caps) 1436 { 1437 int ret; 1438 1439 ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, 1440 USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500); 1441 if (ret) 1442 return ret; 1443 1444 return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0, 1445 USB4_SB_DATA, caps, sizeof(*caps) * 2); 1446 } 1447 1448 /** 1449 * usb4_port_hw_margin() - Run hardware lane margining on port 1450 * @port: USB4 port 1451 * @lanes: Which lanes to run (must match the port capabilities). Can be 1452 * %0, %1 or %7. 1453 * @ber_level: BER level contour value 1454 * @timing: Perform timing margining instead of voltage 1455 * @right_high: Use Right/high margin instead of left/low 1456 * @results: Array with at least two elements to hold the results 1457 * 1458 * Runs hardware lane margining on USB4 port and returns the result in 1459 * @results. 1460 */ 1461 int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes, 1462 unsigned int ber_level, bool timing, bool right_high, 1463 u32 *results) 1464 { 1465 u32 val; 1466 int ret; 1467 1468 val = lanes; 1469 if (timing) 1470 val |= USB4_MARGIN_HW_TIME; 1471 if (right_high) 1472 val |= USB4_MARGIN_HW_RH; 1473 if (ber_level) 1474 val |= (ber_level << USB4_MARGIN_HW_BER_SHIFT) & 1475 USB4_MARGIN_HW_BER_MASK; 1476 1477 ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1478 USB4_SB_METADATA, &val, sizeof(val)); 1479 if (ret) 1480 return ret; 1481 1482 ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, 1483 USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500); 1484 if (ret) 1485 return ret; 1486 1487 return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0, 1488 USB4_SB_DATA, results, sizeof(*results) * 2); 1489 } 1490 1491 /** 1492 * usb4_port_sw_margin() - Run software lane margining on port 1493 * @port: USB4 port 1494 * @lanes: Which lanes to run (must match the port capabilities). Can be 1495 * %0, %1 or %7. 1496 * @timing: Perform timing margining instead of voltage 1497 * @right_high: Use Right/high margin instead of left/low 1498 * @counter: What to do with the error counter 1499 * 1500 * Runs software lane margining on USB4 port. Read back the error 1501 * counters by calling usb4_port_sw_margin_errors(). Returns %0 in 1502 * success and negative errno otherwise. 1503 */ 1504 int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing, 1505 bool right_high, u32 counter) 1506 { 1507 u32 val; 1508 int ret; 1509 1510 val = lanes; 1511 if (timing) 1512 val |= USB4_MARGIN_SW_TIME; 1513 if (right_high) 1514 val |= USB4_MARGIN_SW_RH; 1515 val |= (counter << USB4_MARGIN_SW_COUNTER_SHIFT) & 1516 USB4_MARGIN_SW_COUNTER_MASK; 1517 1518 ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1519 USB4_SB_METADATA, &val, sizeof(val)); 1520 if (ret) 1521 return ret; 1522 1523 return usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, 1524 USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500); 1525 } 1526 1527 /** 1528 * usb4_port_sw_margin_errors() - Read the software margining error counters 1529 * @port: USB4 port 1530 * @errors: Error metadata is copied here. 1531 * 1532 * This reads back the software margining error counters from the port. 1533 * Returns %0 in success and negative errno otherwise. 1534 */ 1535 int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors) 1536 { 1537 int ret; 1538 1539 ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, 1540 USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150); 1541 if (ret) 1542 return ret; 1543 1544 return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0, 1545 USB4_SB_METADATA, errors, sizeof(*errors)); 1546 } 1547 1548 static inline int usb4_port_retimer_op(struct tb_port *port, u8 index, 1549 enum usb4_sb_opcode opcode, 1550 int timeout_msec) 1551 { 1552 return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode, 1553 timeout_msec); 1554 } 1555 1556 /** 1557 * usb4_port_retimer_set_inbound_sbtx() - Enable sideband channel transactions 1558 * @port: USB4 port 1559 * @index: Retimer index 1560 * 1561 * Enables sideband channel transations on SBTX. Can be used when USB4 1562 * link does not go up, for example if there is no device connected. 1563 */ 1564 int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index) 1565 { 1566 int ret; 1567 1568 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX, 1569 500); 1570 1571 if (ret != -ENODEV) 1572 return ret; 1573 1574 /* 1575 * Per the USB4 retimer spec, the retimer is not required to 1576 * send an RT (Retimer Transaction) response for the first 1577 * SET_INBOUND_SBTX command 1578 */ 1579 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX, 1580 500); 1581 } 1582 1583 /** 1584 * usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions 1585 * @port: USB4 port 1586 * @index: Retimer index 1587 * 1588 * Disables sideband channel transations on SBTX. The reverse of 1589 * usb4_port_retimer_set_inbound_sbtx(). 1590 */ 1591 int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index) 1592 { 1593 return usb4_port_retimer_op(port, index, 1594 USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500); 1595 } 1596 1597 /** 1598 * usb4_port_retimer_read() - Read from retimer sideband registers 1599 * @port: USB4 port 1600 * @index: Retimer index 1601 * @reg: Sideband register to read 1602 * @buf: Data from @reg is stored here 1603 * @size: Number of bytes to read 1604 * 1605 * Function reads retimer sideband registers starting from @reg. The 1606 * retimer is connected to @port at @index. Returns %0 in case of 1607 * success, and read data is copied to @buf. If there is no retimer 1608 * present at given @index returns %-ENODEV. In any other failure 1609 * returns negative errno. 1610 */ 1611 int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf, 1612 u8 size) 1613 { 1614 return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf, 1615 size); 1616 } 1617 1618 /** 1619 * usb4_port_retimer_write() - Write to retimer sideband registers 1620 * @port: USB4 port 1621 * @index: Retimer index 1622 * @reg: Sideband register to write 1623 * @buf: Data that is written starting from @reg 1624 * @size: Number of bytes to write 1625 * 1626 * Writes retimer sideband registers starting from @reg. The retimer is 1627 * connected to @port at @index. Returns %0 in case of success. If there 1628 * is no retimer present at given @index returns %-ENODEV. In any other 1629 * failure returns negative errno. 1630 */ 1631 int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg, 1632 const void *buf, u8 size) 1633 { 1634 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf, 1635 size); 1636 } 1637 1638 /** 1639 * usb4_port_retimer_is_last() - Is the retimer last on-board retimer 1640 * @port: USB4 port 1641 * @index: Retimer index 1642 * 1643 * If the retimer at @index is last one (connected directly to the 1644 * Type-C port) this function returns %1. If it is not returns %0. If 1645 * the retimer is not present returns %-ENODEV. Otherwise returns 1646 * negative errno. 1647 */ 1648 int usb4_port_retimer_is_last(struct tb_port *port, u8 index) 1649 { 1650 u32 metadata; 1651 int ret; 1652 1653 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER, 1654 500); 1655 if (ret) 1656 return ret; 1657 1658 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata, 1659 sizeof(metadata)); 1660 return ret ? ret : metadata & 1; 1661 } 1662 1663 /** 1664 * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size 1665 * @port: USB4 port 1666 * @index: Retimer index 1667 * 1668 * Reads NVM sector size (in bytes) of a retimer at @index. This 1669 * operation can be used to determine whether the retimer supports NVM 1670 * upgrade for example. Returns sector size in bytes or negative errno 1671 * in case of error. Specifically returns %-ENODEV if there is no 1672 * retimer at @index. 1673 */ 1674 int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index) 1675 { 1676 u32 metadata; 1677 int ret; 1678 1679 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE, 1680 500); 1681 if (ret) 1682 return ret; 1683 1684 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata, 1685 sizeof(metadata)); 1686 return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK; 1687 } 1688 1689 /** 1690 * usb4_port_retimer_nvm_set_offset() - Set NVM write offset 1691 * @port: USB4 port 1692 * @index: Retimer index 1693 * @address: Start offset 1694 * 1695 * Exlicitly sets NVM write offset. Normally when writing to NVM this is 1696 * done automatically by usb4_port_retimer_nvm_write(). 1697 * 1698 * Returns %0 in success and negative errno if there was a failure. 1699 */ 1700 int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index, 1701 unsigned int address) 1702 { 1703 u32 metadata, dwaddress; 1704 int ret; 1705 1706 dwaddress = address / 4; 1707 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & 1708 USB4_NVM_SET_OFFSET_MASK; 1709 1710 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata, 1711 sizeof(metadata)); 1712 if (ret) 1713 return ret; 1714 1715 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET, 1716 500); 1717 } 1718 1719 struct retimer_info { 1720 struct tb_port *port; 1721 u8 index; 1722 }; 1723 1724 static int usb4_port_retimer_nvm_write_next_block(void *data, 1725 unsigned int dwaddress, const void *buf, size_t dwords) 1726 1727 { 1728 const struct retimer_info *info = data; 1729 struct tb_port *port = info->port; 1730 u8 index = info->index; 1731 int ret; 1732 1733 ret = usb4_port_retimer_write(port, index, USB4_SB_DATA, 1734 buf, dwords * 4); 1735 if (ret) 1736 return ret; 1737 1738 return usb4_port_retimer_op(port, index, 1739 USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000); 1740 } 1741 1742 /** 1743 * usb4_port_retimer_nvm_write() - Write to retimer NVM 1744 * @port: USB4 port 1745 * @index: Retimer index 1746 * @address: Byte address where to start the write 1747 * @buf: Data to write 1748 * @size: Size in bytes how much to write 1749 * 1750 * Writes @size bytes from @buf to the retimer NVM. Used for NVM 1751 * upgrade. Returns %0 if the data was written successfully and negative 1752 * errno in case of failure. Specifically returns %-ENODEV if there is 1753 * no retimer at @index. 1754 */ 1755 int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address, 1756 const void *buf, size_t size) 1757 { 1758 struct retimer_info info = { .port = port, .index = index }; 1759 int ret; 1760 1761 ret = usb4_port_retimer_nvm_set_offset(port, index, address); 1762 if (ret) 1763 return ret; 1764 1765 return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES, 1766 usb4_port_retimer_nvm_write_next_block, &info); 1767 } 1768 1769 /** 1770 * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade 1771 * @port: USB4 port 1772 * @index: Retimer index 1773 * 1774 * After the new NVM image has been written via usb4_port_retimer_nvm_write() 1775 * this function can be used to trigger the NVM upgrade process. If 1776 * successful the retimer restarts with the new NVM and may not have the 1777 * index set so one needs to call usb4_port_enumerate_retimers() to 1778 * force index to be assigned. 1779 */ 1780 int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index) 1781 { 1782 u32 val; 1783 1784 /* 1785 * We need to use the raw operation here because once the 1786 * authentication completes the retimer index is not set anymore 1787 * so we do not get back the status now. 1788 */ 1789 val = USB4_SB_OPCODE_NVM_AUTH_WRITE; 1790 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, 1791 USB4_SB_OPCODE, &val, sizeof(val)); 1792 } 1793 1794 /** 1795 * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade 1796 * @port: USB4 port 1797 * @index: Retimer index 1798 * @status: Raw status code read from metadata 1799 * 1800 * This can be called after usb4_port_retimer_nvm_authenticate() and 1801 * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade. 1802 * 1803 * Returns %0 if the authentication status was successfully read. The 1804 * completion metadata (the result) is then stored into @status. If 1805 * reading the status fails, returns negative errno. 1806 */ 1807 int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index, 1808 u32 *status) 1809 { 1810 u32 metadata, val; 1811 int ret; 1812 1813 ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val, 1814 sizeof(val)); 1815 if (ret) 1816 return ret; 1817 1818 ret = usb4_port_sb_opcode_err_to_errno(val); 1819 switch (ret) { 1820 case 0: 1821 *status = 0; 1822 return 0; 1823 1824 case -EAGAIN: 1825 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, 1826 &metadata, sizeof(metadata)); 1827 if (ret) 1828 return ret; 1829 1830 *status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK; 1831 return 0; 1832 1833 default: 1834 return ret; 1835 } 1836 } 1837 1838 static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress, 1839 void *buf, size_t dwords) 1840 { 1841 const struct retimer_info *info = data; 1842 struct tb_port *port = info->port; 1843 u8 index = info->index; 1844 u32 metadata; 1845 int ret; 1846 1847 metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT; 1848 if (dwords < NVM_DATA_DWORDS) 1849 metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT; 1850 1851 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata, 1852 sizeof(metadata)); 1853 if (ret) 1854 return ret; 1855 1856 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500); 1857 if (ret) 1858 return ret; 1859 1860 return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf, 1861 dwords * 4); 1862 } 1863 1864 /** 1865 * usb4_port_retimer_nvm_read() - Read contents of retimer NVM 1866 * @port: USB4 port 1867 * @index: Retimer index 1868 * @address: NVM address (in bytes) to start reading 1869 * @buf: Data read from NVM is stored here 1870 * @size: Number of bytes to read 1871 * 1872 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the 1873 * read was successful and negative errno in case of failure. 1874 * Specifically returns %-ENODEV if there is no retimer at @index. 1875 */ 1876 int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index, 1877 unsigned int address, void *buf, size_t size) 1878 { 1879 struct retimer_info info = { .port = port, .index = index }; 1880 1881 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, 1882 usb4_port_retimer_nvm_read_block, &info); 1883 } 1884 1885 static inline unsigned int 1886 usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw) 1887 { 1888 /* Take the possible bandwidth limitation into account */ 1889 if (port->max_bw) 1890 return min(bw, port->max_bw); 1891 return bw; 1892 } 1893 1894 /** 1895 * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate 1896 * @port: USB3 adapter port 1897 * 1898 * Return maximum supported link rate of a USB3 adapter in Mb/s. 1899 * Negative errno in case of error. 1900 */ 1901 int usb4_usb3_port_max_link_rate(struct tb_port *port) 1902 { 1903 int ret, lr; 1904 u32 val; 1905 1906 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port)) 1907 return -EINVAL; 1908 1909 ret = tb_port_read(port, &val, TB_CFG_PORT, 1910 port->cap_adap + ADP_USB3_CS_4, 1); 1911 if (ret) 1912 return ret; 1913 1914 lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT; 1915 ret = lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000; 1916 1917 return usb4_usb3_port_max_bandwidth(port, ret); 1918 } 1919 1920 /** 1921 * usb4_usb3_port_actual_link_rate() - Established USB3 link rate 1922 * @port: USB3 adapter port 1923 * 1924 * Return actual established link rate of a USB3 adapter in Mb/s. If the 1925 * link is not up returns %0 and negative errno in case of failure. 1926 */ 1927 int usb4_usb3_port_actual_link_rate(struct tb_port *port) 1928 { 1929 int ret, lr; 1930 u32 val; 1931 1932 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port)) 1933 return -EINVAL; 1934 1935 ret = tb_port_read(port, &val, TB_CFG_PORT, 1936 port->cap_adap + ADP_USB3_CS_4, 1); 1937 if (ret) 1938 return ret; 1939 1940 if (!(val & ADP_USB3_CS_4_ULV)) 1941 return 0; 1942 1943 lr = val & ADP_USB3_CS_4_ALR_MASK; 1944 ret = lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000; 1945 1946 return usb4_usb3_port_max_bandwidth(port, ret); 1947 } 1948 1949 static int usb4_usb3_port_cm_request(struct tb_port *port, bool request) 1950 { 1951 int ret; 1952 u32 val; 1953 1954 if (!tb_port_is_usb3_down(port)) 1955 return -EINVAL; 1956 if (tb_route(port->sw)) 1957 return -EINVAL; 1958 1959 ret = tb_port_read(port, &val, TB_CFG_PORT, 1960 port->cap_adap + ADP_USB3_CS_2, 1); 1961 if (ret) 1962 return ret; 1963 1964 if (request) 1965 val |= ADP_USB3_CS_2_CMR; 1966 else 1967 val &= ~ADP_USB3_CS_2_CMR; 1968 1969 ret = tb_port_write(port, &val, TB_CFG_PORT, 1970 port->cap_adap + ADP_USB3_CS_2, 1); 1971 if (ret) 1972 return ret; 1973 1974 /* 1975 * We can use val here directly as the CMR bit is in the same place 1976 * as HCA. Just mask out others. 1977 */ 1978 val &= ADP_USB3_CS_2_CMR; 1979 return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1, 1980 ADP_USB3_CS_1_HCA, val, 1500); 1981 } 1982 1983 static inline int usb4_usb3_port_set_cm_request(struct tb_port *port) 1984 { 1985 return usb4_usb3_port_cm_request(port, true); 1986 } 1987 1988 static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port) 1989 { 1990 return usb4_usb3_port_cm_request(port, false); 1991 } 1992 1993 static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale) 1994 { 1995 unsigned long uframes; 1996 1997 uframes = bw * 512UL << scale; 1998 return DIV_ROUND_CLOSEST(uframes * 8000, MEGA); 1999 } 2000 2001 static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale) 2002 { 2003 unsigned long uframes; 2004 2005 /* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */ 2006 uframes = ((unsigned long)mbps * MEGA) / 8000; 2007 return DIV_ROUND_UP(uframes, 512UL << scale); 2008 } 2009 2010 static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port, 2011 int *upstream_bw, 2012 int *downstream_bw) 2013 { 2014 u32 val, bw, scale; 2015 int ret; 2016 2017 ret = tb_port_read(port, &val, TB_CFG_PORT, 2018 port->cap_adap + ADP_USB3_CS_2, 1); 2019 if (ret) 2020 return ret; 2021 2022 ret = tb_port_read(port, &scale, TB_CFG_PORT, 2023 port->cap_adap + ADP_USB3_CS_3, 1); 2024 if (ret) 2025 return ret; 2026 2027 scale &= ADP_USB3_CS_3_SCALE_MASK; 2028 2029 bw = val & ADP_USB3_CS_2_AUBW_MASK; 2030 *upstream_bw = usb3_bw_to_mbps(bw, scale); 2031 2032 bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT; 2033 *downstream_bw = usb3_bw_to_mbps(bw, scale); 2034 2035 return 0; 2036 } 2037 2038 /** 2039 * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3 2040 * @port: USB3 adapter port 2041 * @upstream_bw: Allocated upstream bandwidth is stored here 2042 * @downstream_bw: Allocated downstream bandwidth is stored here 2043 * 2044 * Stores currently allocated USB3 bandwidth into @upstream_bw and 2045 * @downstream_bw in Mb/s. Returns %0 in case of success and negative 2046 * errno in failure. 2047 */ 2048 int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw, 2049 int *downstream_bw) 2050 { 2051 int ret; 2052 2053 ret = usb4_usb3_port_set_cm_request(port); 2054 if (ret) 2055 return ret; 2056 2057 ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw, 2058 downstream_bw); 2059 usb4_usb3_port_clear_cm_request(port); 2060 2061 return ret; 2062 } 2063 2064 static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port, 2065 int *upstream_bw, 2066 int *downstream_bw) 2067 { 2068 u32 val, bw, scale; 2069 int ret; 2070 2071 ret = tb_port_read(port, &val, TB_CFG_PORT, 2072 port->cap_adap + ADP_USB3_CS_1, 1); 2073 if (ret) 2074 return ret; 2075 2076 ret = tb_port_read(port, &scale, TB_CFG_PORT, 2077 port->cap_adap + ADP_USB3_CS_3, 1); 2078 if (ret) 2079 return ret; 2080 2081 scale &= ADP_USB3_CS_3_SCALE_MASK; 2082 2083 bw = val & ADP_USB3_CS_1_CUBW_MASK; 2084 *upstream_bw = usb3_bw_to_mbps(bw, scale); 2085 2086 bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT; 2087 *downstream_bw = usb3_bw_to_mbps(bw, scale); 2088 2089 return 0; 2090 } 2091 2092 static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port, 2093 int upstream_bw, 2094 int downstream_bw) 2095 { 2096 u32 val, ubw, dbw, scale; 2097 int ret, max_bw; 2098 2099 /* Figure out suitable scale */ 2100 scale = 0; 2101 max_bw = max(upstream_bw, downstream_bw); 2102 while (scale < 64) { 2103 if (mbps_to_usb3_bw(max_bw, scale) < 4096) 2104 break; 2105 scale++; 2106 } 2107 2108 if (WARN_ON(scale >= 64)) 2109 return -EINVAL; 2110 2111 ret = tb_port_write(port, &scale, TB_CFG_PORT, 2112 port->cap_adap + ADP_USB3_CS_3, 1); 2113 if (ret) 2114 return ret; 2115 2116 ubw = mbps_to_usb3_bw(upstream_bw, scale); 2117 dbw = mbps_to_usb3_bw(downstream_bw, scale); 2118 2119 tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale); 2120 2121 ret = tb_port_read(port, &val, TB_CFG_PORT, 2122 port->cap_adap + ADP_USB3_CS_2, 1); 2123 if (ret) 2124 return ret; 2125 2126 val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK); 2127 val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT; 2128 val |= ubw; 2129 2130 return tb_port_write(port, &val, TB_CFG_PORT, 2131 port->cap_adap + ADP_USB3_CS_2, 1); 2132 } 2133 2134 /** 2135 * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3 2136 * @port: USB3 adapter port 2137 * @upstream_bw: New upstream bandwidth 2138 * @downstream_bw: New downstream bandwidth 2139 * 2140 * This can be used to set how much bandwidth is allocated for the USB3 2141 * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the 2142 * new values programmed to the USB3 adapter allocation registers. If 2143 * the values are lower than what is currently consumed the allocation 2144 * is set to what is currently consumed instead (consumed bandwidth 2145 * cannot be taken away by CM). The actual new values are returned in 2146 * @upstream_bw and @downstream_bw. 2147 * 2148 * Returns %0 in case of success and negative errno if there was a 2149 * failure. 2150 */ 2151 int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw, 2152 int *downstream_bw) 2153 { 2154 int ret, consumed_up, consumed_down, allocate_up, allocate_down; 2155 2156 ret = usb4_usb3_port_set_cm_request(port); 2157 if (ret) 2158 return ret; 2159 2160 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up, 2161 &consumed_down); 2162 if (ret) 2163 goto err_request; 2164 2165 /* Don't allow it go lower than what is consumed */ 2166 allocate_up = max(*upstream_bw, consumed_up); 2167 allocate_down = max(*downstream_bw, consumed_down); 2168 2169 ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up, 2170 allocate_down); 2171 if (ret) 2172 goto err_request; 2173 2174 *upstream_bw = allocate_up; 2175 *downstream_bw = allocate_down; 2176 2177 err_request: 2178 usb4_usb3_port_clear_cm_request(port); 2179 return ret; 2180 } 2181 2182 /** 2183 * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth 2184 * @port: USB3 adapter port 2185 * @upstream_bw: New allocated upstream bandwidth 2186 * @downstream_bw: New allocated downstream bandwidth 2187 * 2188 * Releases USB3 allocated bandwidth down to what is actually consumed. 2189 * The new bandwidth is returned in @upstream_bw and @downstream_bw. 2190 * 2191 * Returns 0% in success and negative errno in case of failure. 2192 */ 2193 int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw, 2194 int *downstream_bw) 2195 { 2196 int ret, consumed_up, consumed_down; 2197 2198 ret = usb4_usb3_port_set_cm_request(port); 2199 if (ret) 2200 return ret; 2201 2202 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up, 2203 &consumed_down); 2204 if (ret) 2205 goto err_request; 2206 2207 /* 2208 * Always keep 1000 Mb/s to make sure xHCI has at least some 2209 * bandwidth available for isochronous traffic. 2210 */ 2211 if (consumed_up < 1000) 2212 consumed_up = 1000; 2213 if (consumed_down < 1000) 2214 consumed_down = 1000; 2215 2216 ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up, 2217 consumed_down); 2218 if (ret) 2219 goto err_request; 2220 2221 *upstream_bw = consumed_up; 2222 *downstream_bw = consumed_down; 2223 2224 err_request: 2225 usb4_usb3_port_clear_cm_request(port); 2226 return ret; 2227 } 2228 2229 static bool is_usb4_dpin(const struct tb_port *port) 2230 { 2231 if (!tb_port_is_dpin(port)) 2232 return false; 2233 if (!tb_switch_is_usb4(port->sw)) 2234 return false; 2235 return true; 2236 } 2237 2238 /** 2239 * usb4_dp_port_set_cm_id() - Assign CM ID to the DP IN adapter 2240 * @port: DP IN adapter 2241 * @cm_id: CM ID to assign 2242 * 2243 * Sets CM ID for the @port. Returns %0 on success and negative errno 2244 * otherwise. Speficially returns %-EOPNOTSUPP if the @port does not 2245 * support this. 2246 */ 2247 int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id) 2248 { 2249 u32 val; 2250 int ret; 2251 2252 if (!is_usb4_dpin(port)) 2253 return -EOPNOTSUPP; 2254 2255 ret = tb_port_read(port, &val, TB_CFG_PORT, 2256 port->cap_adap + ADP_DP_CS_2, 1); 2257 if (ret) 2258 return ret; 2259 2260 val &= ~ADP_DP_CS_2_CM_ID_MASK; 2261 val |= cm_id << ADP_DP_CS_2_CM_ID_SHIFT; 2262 2263 return tb_port_write(port, &val, TB_CFG_PORT, 2264 port->cap_adap + ADP_DP_CS_2, 1); 2265 } 2266 2267 /** 2268 * usb4_dp_port_bw_mode_supported() - Is the bandwidth allocation mode supported 2269 * @port: DP IN adapter to check 2270 * 2271 * Can be called to any DP IN adapter. Returns true if the adapter 2272 * supports USB4 bandwidth allocation mode, false otherwise. 2273 */ 2274 bool usb4_dp_port_bw_mode_supported(struct tb_port *port) 2275 { 2276 int ret; 2277 u32 val; 2278 2279 if (!is_usb4_dpin(port)) 2280 return false; 2281 2282 ret = tb_port_read(port, &val, TB_CFG_PORT, 2283 port->cap_adap + DP_LOCAL_CAP, 1); 2284 if (ret) 2285 return false; 2286 2287 return !!(val & DP_COMMON_CAP_BW_MODE); 2288 } 2289 2290 /** 2291 * usb4_dp_port_bw_mode_enabled() - Is the bandwidth allocation mode enabled 2292 * @port: DP IN adapter to check 2293 * 2294 * Can be called to any DP IN adapter. Returns true if the bandwidth 2295 * allocation mode has been enabled, false otherwise. 2296 */ 2297 bool usb4_dp_port_bw_mode_enabled(struct tb_port *port) 2298 { 2299 int ret; 2300 u32 val; 2301 2302 if (!is_usb4_dpin(port)) 2303 return false; 2304 2305 ret = tb_port_read(port, &val, TB_CFG_PORT, 2306 port->cap_adap + ADP_DP_CS_8, 1); 2307 if (ret) 2308 return false; 2309 2310 return !!(val & ADP_DP_CS_8_DPME); 2311 } 2312 2313 /** 2314 * usb4_dp_port_set_cm_bw_mode_supported() - Set/clear CM support for bandwidth allocation mode 2315 * @port: DP IN adapter 2316 * @supported: Does the CM support bandwidth allocation mode 2317 * 2318 * Can be called to any DP IN adapter. Sets or clears the CM support bit 2319 * of the DP IN adapter. Returns %0 in success and negative errno 2320 * otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter 2321 * does not support this. 2322 */ 2323 int usb4_dp_port_set_cm_bw_mode_supported(struct tb_port *port, bool supported) 2324 { 2325 u32 val; 2326 int ret; 2327 2328 if (!is_usb4_dpin(port)) 2329 return -EOPNOTSUPP; 2330 2331 ret = tb_port_read(port, &val, TB_CFG_PORT, 2332 port->cap_adap + ADP_DP_CS_2, 1); 2333 if (ret) 2334 return ret; 2335 2336 if (supported) 2337 val |= ADP_DP_CS_2_CMMS; 2338 else 2339 val &= ~ADP_DP_CS_2_CMMS; 2340 2341 return tb_port_write(port, &val, TB_CFG_PORT, 2342 port->cap_adap + ADP_DP_CS_2, 1); 2343 } 2344 2345 /** 2346 * usb4_dp_port_group_id() - Return Group ID assigned for the adapter 2347 * @port: DP IN adapter 2348 * 2349 * Reads bandwidth allocation Group ID from the DP IN adapter and 2350 * returns it. If the adapter does not support setting Group_ID 2351 * %-EOPNOTSUPP is returned. 2352 */ 2353 int usb4_dp_port_group_id(struct tb_port *port) 2354 { 2355 u32 val; 2356 int ret; 2357 2358 if (!is_usb4_dpin(port)) 2359 return -EOPNOTSUPP; 2360 2361 ret = tb_port_read(port, &val, TB_CFG_PORT, 2362 port->cap_adap + ADP_DP_CS_2, 1); 2363 if (ret) 2364 return ret; 2365 2366 return (val & ADP_DP_CS_2_GROUP_ID_MASK) >> ADP_DP_CS_2_GROUP_ID_SHIFT; 2367 } 2368 2369 /** 2370 * usb4_dp_port_set_group_id() - Set adapter Group ID 2371 * @port: DP IN adapter 2372 * @group_id: Group ID for the adapter 2373 * 2374 * Sets bandwidth allocation mode Group ID for the DP IN adapter. 2375 * Returns %0 in case of success and negative errno otherwise. 2376 * Specifically returns %-EOPNOTSUPP if the adapter does not support 2377 * this. 2378 */ 2379 int usb4_dp_port_set_group_id(struct tb_port *port, int group_id) 2380 { 2381 u32 val; 2382 int ret; 2383 2384 if (!is_usb4_dpin(port)) 2385 return -EOPNOTSUPP; 2386 2387 ret = tb_port_read(port, &val, TB_CFG_PORT, 2388 port->cap_adap + ADP_DP_CS_2, 1); 2389 if (ret) 2390 return ret; 2391 2392 val &= ~ADP_DP_CS_2_GROUP_ID_MASK; 2393 val |= group_id << ADP_DP_CS_2_GROUP_ID_SHIFT; 2394 2395 return tb_port_write(port, &val, TB_CFG_PORT, 2396 port->cap_adap + ADP_DP_CS_2, 1); 2397 } 2398 2399 /** 2400 * usb4_dp_port_nrd() - Read non-reduced rate and lanes 2401 * @port: DP IN adapter 2402 * @rate: Non-reduced rate in Mb/s is placed here 2403 * @lanes: Non-reduced lanes are placed here 2404 * 2405 * Reads the non-reduced rate and lanes from the DP IN adapter. Returns 2406 * %0 in success and negative errno otherwise. Specifically returns 2407 * %-EOPNOTSUPP if the adapter does not support this. 2408 */ 2409 int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes) 2410 { 2411 u32 val, tmp; 2412 int ret; 2413 2414 if (!is_usb4_dpin(port)) 2415 return -EOPNOTSUPP; 2416 2417 ret = tb_port_read(port, &val, TB_CFG_PORT, 2418 port->cap_adap + ADP_DP_CS_2, 1); 2419 if (ret) 2420 return ret; 2421 2422 tmp = (val & ADP_DP_CS_2_NRD_MLR_MASK) >> ADP_DP_CS_2_NRD_MLR_SHIFT; 2423 switch (tmp) { 2424 case DP_COMMON_CAP_RATE_RBR: 2425 *rate = 1620; 2426 break; 2427 case DP_COMMON_CAP_RATE_HBR: 2428 *rate = 2700; 2429 break; 2430 case DP_COMMON_CAP_RATE_HBR2: 2431 *rate = 5400; 2432 break; 2433 case DP_COMMON_CAP_RATE_HBR3: 2434 *rate = 8100; 2435 break; 2436 } 2437 2438 tmp = val & ADP_DP_CS_2_NRD_MLC_MASK; 2439 switch (tmp) { 2440 case DP_COMMON_CAP_1_LANE: 2441 *lanes = 1; 2442 break; 2443 case DP_COMMON_CAP_2_LANES: 2444 *lanes = 2; 2445 break; 2446 case DP_COMMON_CAP_4_LANES: 2447 *lanes = 4; 2448 break; 2449 } 2450 2451 return 0; 2452 } 2453 2454 /** 2455 * usb4_dp_port_set_nrd() - Set non-reduced rate and lanes 2456 * @port: DP IN adapter 2457 * @rate: Non-reduced rate in Mb/s 2458 * @lanes: Non-reduced lanes 2459 * 2460 * Before the capabilities reduction this function can be used to set 2461 * the non-reduced values for the DP IN adapter. Returns %0 in success 2462 * and negative errno otherwise. If the adapter does not support this 2463 * %-EOPNOTSUPP is returned. 2464 */ 2465 int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes) 2466 { 2467 u32 val; 2468 int ret; 2469 2470 if (!is_usb4_dpin(port)) 2471 return -EOPNOTSUPP; 2472 2473 ret = tb_port_read(port, &val, TB_CFG_PORT, 2474 port->cap_adap + ADP_DP_CS_2, 1); 2475 if (ret) 2476 return ret; 2477 2478 val &= ~ADP_DP_CS_2_NRD_MLR_MASK; 2479 2480 switch (rate) { 2481 case 1620: 2482 break; 2483 case 2700: 2484 val |= (DP_COMMON_CAP_RATE_HBR << ADP_DP_CS_2_NRD_MLR_SHIFT) 2485 & ADP_DP_CS_2_NRD_MLR_MASK; 2486 break; 2487 case 5400: 2488 val |= (DP_COMMON_CAP_RATE_HBR2 << ADP_DP_CS_2_NRD_MLR_SHIFT) 2489 & ADP_DP_CS_2_NRD_MLR_MASK; 2490 break; 2491 case 8100: 2492 val |= (DP_COMMON_CAP_RATE_HBR3 << ADP_DP_CS_2_NRD_MLR_SHIFT) 2493 & ADP_DP_CS_2_NRD_MLR_MASK; 2494 break; 2495 default: 2496 return -EINVAL; 2497 } 2498 2499 val &= ~ADP_DP_CS_2_NRD_MLC_MASK; 2500 2501 switch (lanes) { 2502 case 1: 2503 break; 2504 case 2: 2505 val |= DP_COMMON_CAP_2_LANES; 2506 break; 2507 case 4: 2508 val |= DP_COMMON_CAP_4_LANES; 2509 break; 2510 default: 2511 return -EINVAL; 2512 } 2513 2514 return tb_port_write(port, &val, TB_CFG_PORT, 2515 port->cap_adap + ADP_DP_CS_2, 1); 2516 } 2517 2518 /** 2519 * usb4_dp_port_granularity() - Return granularity for the bandwidth values 2520 * @port: DP IN adapter 2521 * 2522 * Reads the programmed granularity from @port. If the DP IN adapter does 2523 * not support bandwidth allocation mode returns %-EOPNOTSUPP and negative 2524 * errno in other error cases. 2525 */ 2526 int usb4_dp_port_granularity(struct tb_port *port) 2527 { 2528 u32 val; 2529 int ret; 2530 2531 if (!is_usb4_dpin(port)) 2532 return -EOPNOTSUPP; 2533 2534 ret = tb_port_read(port, &val, TB_CFG_PORT, 2535 port->cap_adap + ADP_DP_CS_2, 1); 2536 if (ret) 2537 return ret; 2538 2539 val &= ADP_DP_CS_2_GR_MASK; 2540 val >>= ADP_DP_CS_2_GR_SHIFT; 2541 2542 switch (val) { 2543 case ADP_DP_CS_2_GR_0_25G: 2544 return 250; 2545 case ADP_DP_CS_2_GR_0_5G: 2546 return 500; 2547 case ADP_DP_CS_2_GR_1G: 2548 return 1000; 2549 } 2550 2551 return -EINVAL; 2552 } 2553 2554 /** 2555 * usb4_dp_port_set_granularity() - Set granularity for the bandwidth values 2556 * @port: DP IN adapter 2557 * @granularity: Granularity in Mb/s. Supported values: 1000, 500 and 250. 2558 * 2559 * Sets the granularity used with the estimated, allocated and requested 2560 * bandwidth. Returns %0 in success and negative errno otherwise. If the 2561 * adapter does not support this %-EOPNOTSUPP is returned. 2562 */ 2563 int usb4_dp_port_set_granularity(struct tb_port *port, int granularity) 2564 { 2565 u32 val; 2566 int ret; 2567 2568 if (!is_usb4_dpin(port)) 2569 return -EOPNOTSUPP; 2570 2571 ret = tb_port_read(port, &val, TB_CFG_PORT, 2572 port->cap_adap + ADP_DP_CS_2, 1); 2573 if (ret) 2574 return ret; 2575 2576 val &= ~ADP_DP_CS_2_GR_MASK; 2577 2578 switch (granularity) { 2579 case 250: 2580 val |= ADP_DP_CS_2_GR_0_25G << ADP_DP_CS_2_GR_SHIFT; 2581 break; 2582 case 500: 2583 val |= ADP_DP_CS_2_GR_0_5G << ADP_DP_CS_2_GR_SHIFT; 2584 break; 2585 case 1000: 2586 val |= ADP_DP_CS_2_GR_1G << ADP_DP_CS_2_GR_SHIFT; 2587 break; 2588 default: 2589 return -EINVAL; 2590 } 2591 2592 return tb_port_write(port, &val, TB_CFG_PORT, 2593 port->cap_adap + ADP_DP_CS_2, 1); 2594 } 2595 2596 /** 2597 * usb4_dp_port_set_estimated_bw() - Set estimated bandwidth 2598 * @port: DP IN adapter 2599 * @bw: Estimated bandwidth in Mb/s. 2600 * 2601 * Sets the estimated bandwidth to @bw. Set the granularity by calling 2602 * usb4_dp_port_set_granularity() before calling this. The @bw is round 2603 * down to the closest granularity multiplier. Returns %0 in success 2604 * and negative errno otherwise. Specifically returns %-EOPNOTSUPP if 2605 * the adapter does not support this. 2606 */ 2607 int usb4_dp_port_set_estimated_bw(struct tb_port *port, int bw) 2608 { 2609 u32 val, granularity; 2610 int ret; 2611 2612 if (!is_usb4_dpin(port)) 2613 return -EOPNOTSUPP; 2614 2615 ret = usb4_dp_port_granularity(port); 2616 if (ret < 0) 2617 return ret; 2618 granularity = ret; 2619 2620 ret = tb_port_read(port, &val, TB_CFG_PORT, 2621 port->cap_adap + ADP_DP_CS_2, 1); 2622 if (ret) 2623 return ret; 2624 2625 val &= ~ADP_DP_CS_2_ESTIMATED_BW_MASK; 2626 val |= (bw / granularity) << ADP_DP_CS_2_ESTIMATED_BW_SHIFT; 2627 2628 return tb_port_write(port, &val, TB_CFG_PORT, 2629 port->cap_adap + ADP_DP_CS_2, 1); 2630 } 2631 2632 /** 2633 * usb4_dp_port_allocated_bw() - Return allocated bandwidth 2634 * @port: DP IN adapter 2635 * 2636 * Reads and returns allocated bandwidth for @port in Mb/s (taking into 2637 * account the programmed granularity). Returns negative errno in case 2638 * of error. 2639 */ 2640 int usb4_dp_port_allocated_bw(struct tb_port *port) 2641 { 2642 u32 val, granularity; 2643 int ret; 2644 2645 if (!is_usb4_dpin(port)) 2646 return -EOPNOTSUPP; 2647 2648 ret = usb4_dp_port_granularity(port); 2649 if (ret < 0) 2650 return ret; 2651 granularity = ret; 2652 2653 ret = tb_port_read(port, &val, TB_CFG_PORT, 2654 port->cap_adap + DP_STATUS, 1); 2655 if (ret) 2656 return ret; 2657 2658 val &= DP_STATUS_ALLOCATED_BW_MASK; 2659 val >>= DP_STATUS_ALLOCATED_BW_SHIFT; 2660 2661 return val * granularity; 2662 } 2663 2664 static int __usb4_dp_port_set_cm_ack(struct tb_port *port, bool ack) 2665 { 2666 u32 val; 2667 int ret; 2668 2669 ret = tb_port_read(port, &val, TB_CFG_PORT, 2670 port->cap_adap + ADP_DP_CS_2, 1); 2671 if (ret) 2672 return ret; 2673 2674 if (ack) 2675 val |= ADP_DP_CS_2_CA; 2676 else 2677 val &= ~ADP_DP_CS_2_CA; 2678 2679 return tb_port_write(port, &val, TB_CFG_PORT, 2680 port->cap_adap + ADP_DP_CS_2, 1); 2681 } 2682 2683 static inline int usb4_dp_port_set_cm_ack(struct tb_port *port) 2684 { 2685 return __usb4_dp_port_set_cm_ack(port, true); 2686 } 2687 2688 static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port, 2689 int timeout_msec) 2690 { 2691 ktime_t end; 2692 u32 val; 2693 int ret; 2694 2695 ret = __usb4_dp_port_set_cm_ack(port, false); 2696 if (ret) 2697 return ret; 2698 2699 end = ktime_add_ms(ktime_get(), timeout_msec); 2700 do { 2701 ret = tb_port_read(port, &val, TB_CFG_PORT, 2702 port->cap_adap + ADP_DP_CS_8, 1); 2703 if (ret) 2704 return ret; 2705 2706 if (!(val & ADP_DP_CS_8_DR)) 2707 break; 2708 2709 usleep_range(50, 100); 2710 } while (ktime_before(ktime_get(), end)); 2711 2712 if (val & ADP_DP_CS_8_DR) 2713 return -ETIMEDOUT; 2714 2715 ret = tb_port_read(port, &val, TB_CFG_PORT, 2716 port->cap_adap + ADP_DP_CS_2, 1); 2717 if (ret) 2718 return ret; 2719 2720 val &= ~ADP_DP_CS_2_CA; 2721 return tb_port_write(port, &val, TB_CFG_PORT, 2722 port->cap_adap + ADP_DP_CS_2, 1); 2723 } 2724 2725 /** 2726 * usb4_dp_port_allocate_bw() - Set allocated bandwidth 2727 * @port: DP IN adapter 2728 * @bw: New allocated bandwidth in Mb/s 2729 * 2730 * Communicates the new allocated bandwidth with the DPCD (graphics 2731 * driver). Takes into account the programmed granularity. Returns %0 in 2732 * success and negative errno in case of error. 2733 */ 2734 int usb4_dp_port_allocate_bw(struct tb_port *port, int bw) 2735 { 2736 u32 val, granularity; 2737 int ret; 2738 2739 if (!is_usb4_dpin(port)) 2740 return -EOPNOTSUPP; 2741 2742 ret = usb4_dp_port_granularity(port); 2743 if (ret < 0) 2744 return ret; 2745 granularity = ret; 2746 2747 ret = tb_port_read(port, &val, TB_CFG_PORT, 2748 port->cap_adap + DP_STATUS, 1); 2749 if (ret) 2750 return ret; 2751 2752 val &= ~DP_STATUS_ALLOCATED_BW_MASK; 2753 val |= (bw / granularity) << DP_STATUS_ALLOCATED_BW_SHIFT; 2754 2755 ret = tb_port_write(port, &val, TB_CFG_PORT, 2756 port->cap_adap + DP_STATUS, 1); 2757 if (ret) 2758 return ret; 2759 2760 ret = usb4_dp_port_set_cm_ack(port); 2761 if (ret) 2762 return ret; 2763 2764 return usb4_dp_port_wait_and_clear_cm_ack(port, 500); 2765 } 2766 2767 /** 2768 * usb4_dp_port_requested_bw() - Read requested bandwidth 2769 * @port: DP IN adapter 2770 * 2771 * Reads the DPCD (graphics driver) requested bandwidth and returns it 2772 * in Mb/s. Takes the programmed granularity into account. In case of 2773 * error returns negative errno. Specifically returns %-EOPNOTSUPP if 2774 * the adapter does not support bandwidth allocation mode, and %ENODATA 2775 * if there is no active bandwidth request from the graphics driver. 2776 */ 2777 int usb4_dp_port_requested_bw(struct tb_port *port) 2778 { 2779 u32 val, granularity; 2780 int ret; 2781 2782 if (!is_usb4_dpin(port)) 2783 return -EOPNOTSUPP; 2784 2785 ret = usb4_dp_port_granularity(port); 2786 if (ret < 0) 2787 return ret; 2788 granularity = ret; 2789 2790 ret = tb_port_read(port, &val, TB_CFG_PORT, 2791 port->cap_adap + ADP_DP_CS_8, 1); 2792 if (ret) 2793 return ret; 2794 2795 if (!(val & ADP_DP_CS_8_DR)) 2796 return -ENODATA; 2797 2798 return (val & ADP_DP_CS_8_REQUESTED_BW_MASK) * granularity; 2799 } 2800