1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Internal Thunderbolt Connection Manager. This is a firmware running on 4 * the Thunderbolt host controller performing most of the low-level 5 * handling. 6 * 7 * Copyright (C) 2017, Intel Corporation 8 * Authors: Michael Jamet <michael.jamet@intel.com> 9 * Mika Westerberg <mika.westerberg@linux.intel.com> 10 */ 11 12 #include <linux/delay.h> 13 #include <linux/mutex.h> 14 #include <linux/moduleparam.h> 15 #include <linux/pci.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/platform_data/x86/apple.h> 18 #include <linux/sizes.h> 19 #include <linux/slab.h> 20 #include <linux/workqueue.h> 21 22 #include "ctl.h" 23 #include "nhi_regs.h" 24 #include "tb.h" 25 26 #define PCIE2CIO_CMD 0x30 27 #define PCIE2CIO_CMD_TIMEOUT BIT(31) 28 #define PCIE2CIO_CMD_START BIT(30) 29 #define PCIE2CIO_CMD_WRITE BIT(21) 30 #define PCIE2CIO_CMD_CS_MASK GENMASK(20, 19) 31 #define PCIE2CIO_CMD_CS_SHIFT 19 32 #define PCIE2CIO_CMD_PORT_MASK GENMASK(18, 13) 33 #define PCIE2CIO_CMD_PORT_SHIFT 13 34 35 #define PCIE2CIO_WRDATA 0x34 36 #define PCIE2CIO_RDDATA 0x38 37 38 #define PHY_PORT_CS1 0x37 39 #define PHY_PORT_CS1_LINK_DISABLE BIT(14) 40 #define PHY_PORT_CS1_LINK_STATE_MASK GENMASK(29, 26) 41 #define PHY_PORT_CS1_LINK_STATE_SHIFT 26 42 43 #define ICM_TIMEOUT 5000 /* ms */ 44 #define ICM_APPROVE_TIMEOUT 10000 /* ms */ 45 #define ICM_MAX_LINK 4 46 47 static bool start_icm; 48 module_param(start_icm, bool, 0444); 49 MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)"); 50 51 /** 52 * struct usb4_switch_nvm_auth - Holds USB4 NVM_AUTH status 53 * @reply: Reply from ICM firmware is placed here 54 * @request: Request that is sent to ICM firmware 55 * @icm: Pointer to ICM private data 56 */ 57 struct usb4_switch_nvm_auth { 58 struct icm_usb4_switch_op_response reply; 59 struct icm_usb4_switch_op request; 60 struct icm *icm; 61 }; 62 63 /** 64 * struct icm - Internal connection manager private data 65 * @request_lock: Makes sure only one message is send to ICM at time 66 * @rescan_work: Work used to rescan the surviving switches after resume 67 * @upstream_port: Pointer to the PCIe upstream port this host 68 * controller is connected. This is only set for systems 69 * where ICM needs to be started manually 70 * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides 71 * (only set when @upstream_port is not %NULL) 72 * @safe_mode: ICM is in safe mode 73 * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported) 74 * @rpm: Does the controller support runtime PM (RTD3) 75 * @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller 76 * @proto_version: Firmware protocol version 77 * @last_nvm_auth: Last USB4 router NVM_AUTH result (or %NULL if not set) 78 * @veto: Is RTD3 veto in effect 79 * @is_supported: Checks if we can support ICM on this controller 80 * @cio_reset: Trigger CIO reset 81 * @get_mode: Read and return the ICM firmware mode (optional) 82 * @get_route: Find a route string for given switch 83 * @save_devices: Ask ICM to save devices to ACL when suspending (optional) 84 * @driver_ready: Send driver ready message to ICM 85 * @set_uuid: Set UUID for the root switch (optional) 86 * @device_connected: Handle device connected ICM message 87 * @device_disconnected: Handle device disconnected ICM message 88 * @xdomain_connected: Handle XDomain connected ICM message 89 * @xdomain_disconnected: Handle XDomain disconnected ICM message 90 * @rtd3_veto: Handle RTD3 veto notification ICM message 91 */ 92 struct icm { 93 struct mutex request_lock; 94 struct delayed_work rescan_work; 95 struct pci_dev *upstream_port; 96 int vnd_cap; 97 bool safe_mode; 98 size_t max_boot_acl; 99 bool rpm; 100 bool can_upgrade_nvm; 101 u8 proto_version; 102 struct usb4_switch_nvm_auth *last_nvm_auth; 103 bool veto; 104 bool (*is_supported)(struct tb *tb); 105 int (*cio_reset)(struct tb *tb); 106 int (*get_mode)(struct tb *tb); 107 int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route); 108 void (*save_devices)(struct tb *tb); 109 int (*driver_ready)(struct tb *tb, 110 enum tb_security_level *security_level, 111 u8 *proto_version, size_t *nboot_acl, bool *rpm); 112 void (*set_uuid)(struct tb *tb); 113 void (*device_connected)(struct tb *tb, 114 const struct icm_pkg_header *hdr); 115 void (*device_disconnected)(struct tb *tb, 116 const struct icm_pkg_header *hdr); 117 void (*xdomain_connected)(struct tb *tb, 118 const struct icm_pkg_header *hdr); 119 void (*xdomain_disconnected)(struct tb *tb, 120 const struct icm_pkg_header *hdr); 121 void (*rtd3_veto)(struct tb *tb, const struct icm_pkg_header *hdr); 122 }; 123 124 struct icm_notification { 125 struct work_struct work; 126 struct icm_pkg_header *pkg; 127 struct tb *tb; 128 }; 129 130 struct ep_name_entry { 131 u8 len; 132 u8 type; 133 u8 data[]; 134 }; 135 136 #define EP_NAME_INTEL_VSS 0x10 137 138 /* Intel Vendor specific structure */ 139 struct intel_vss { 140 u16 vendor; 141 u16 model; 142 u8 mc; 143 u8 flags; 144 u16 pci_devid; 145 u32 nvm_version; 146 }; 147 148 #define INTEL_VSS_FLAGS_RTD3 BIT(0) 149 150 static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size) 151 { 152 const void *end = ep_name + size; 153 154 while (ep_name < end) { 155 const struct ep_name_entry *ep = ep_name; 156 157 if (!ep->len) 158 break; 159 if (ep_name + ep->len > end) 160 break; 161 162 if (ep->type == EP_NAME_INTEL_VSS) 163 return (const struct intel_vss *)ep->data; 164 165 ep_name += ep->len; 166 } 167 168 return NULL; 169 } 170 171 static bool intel_vss_is_rtd3(const void *ep_name, size_t size) 172 { 173 const struct intel_vss *vss; 174 175 vss = parse_intel_vss(ep_name, size); 176 if (vss) 177 return !!(vss->flags & INTEL_VSS_FLAGS_RTD3); 178 179 return false; 180 } 181 182 static inline struct tb *icm_to_tb(struct icm *icm) 183 { 184 return ((void *)icm - sizeof(struct tb)); 185 } 186 187 static inline u8 phy_port_from_route(u64 route, u8 depth) 188 { 189 u8 link; 190 191 link = depth ? route >> ((depth - 1) * 8) : route; 192 return tb_phy_port_from_link(link); 193 } 194 195 static inline u8 dual_link_from_link(u8 link) 196 { 197 return link ? ((link - 1) ^ 0x01) + 1 : 0; 198 } 199 200 static inline u64 get_route(u32 route_hi, u32 route_lo) 201 { 202 return (u64)route_hi << 32 | route_lo; 203 } 204 205 static inline u64 get_parent_route(u64 route) 206 { 207 int depth = tb_route_length(route); 208 return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0; 209 } 210 211 static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec) 212 { 213 unsigned long end = jiffies + msecs_to_jiffies(timeout_msec); 214 u32 cmd; 215 216 do { 217 pci_read_config_dword(icm->upstream_port, 218 icm->vnd_cap + PCIE2CIO_CMD, &cmd); 219 if (!(cmd & PCIE2CIO_CMD_START)) { 220 if (cmd & PCIE2CIO_CMD_TIMEOUT) 221 break; 222 return 0; 223 } 224 225 msleep(50); 226 } while (time_before(jiffies, end)); 227 228 return -ETIMEDOUT; 229 } 230 231 static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs, 232 unsigned int port, unsigned int index, u32 *data) 233 { 234 struct pci_dev *pdev = icm->upstream_port; 235 int ret, vnd_cap = icm->vnd_cap; 236 u32 cmd; 237 238 cmd = index; 239 cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; 240 cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; 241 cmd |= PCIE2CIO_CMD_START; 242 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); 243 244 ret = pci2cio_wait_completion(icm, 5000); 245 if (ret) 246 return ret; 247 248 pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data); 249 return 0; 250 } 251 252 static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs, 253 unsigned int port, unsigned int index, u32 data) 254 { 255 struct pci_dev *pdev = icm->upstream_port; 256 int vnd_cap = icm->vnd_cap; 257 u32 cmd; 258 259 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data); 260 261 cmd = index; 262 cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; 263 cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; 264 cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START; 265 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); 266 267 return pci2cio_wait_completion(icm, 5000); 268 } 269 270 static bool icm_match(const struct tb_cfg_request *req, 271 const struct ctl_pkg *pkg) 272 { 273 const struct icm_pkg_header *res_hdr = pkg->buffer; 274 const struct icm_pkg_header *req_hdr = req->request; 275 276 if (pkg->frame.eof != req->response_type) 277 return false; 278 if (res_hdr->code != req_hdr->code) 279 return false; 280 281 return true; 282 } 283 284 static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) 285 { 286 const struct icm_pkg_header *hdr = pkg->buffer; 287 288 if (hdr->packet_id < req->npackets) { 289 size_t offset = hdr->packet_id * req->response_size; 290 291 memcpy(req->response + offset, pkg->buffer, req->response_size); 292 } 293 294 return hdr->packet_id == hdr->total_packets - 1; 295 } 296 297 static int icm_request(struct tb *tb, const void *request, size_t request_size, 298 void *response, size_t response_size, size_t npackets, 299 unsigned int timeout_msec) 300 { 301 struct icm *icm = tb_priv(tb); 302 int retries = 3; 303 304 do { 305 struct tb_cfg_request *req; 306 struct tb_cfg_result res; 307 308 req = tb_cfg_request_alloc(); 309 if (!req) 310 return -ENOMEM; 311 312 req->match = icm_match; 313 req->copy = icm_copy; 314 req->request = request; 315 req->request_size = request_size; 316 req->request_type = TB_CFG_PKG_ICM_CMD; 317 req->response = response; 318 req->npackets = npackets; 319 req->response_size = response_size; 320 req->response_type = TB_CFG_PKG_ICM_RESP; 321 322 mutex_lock(&icm->request_lock); 323 res = tb_cfg_request_sync(tb->ctl, req, timeout_msec); 324 mutex_unlock(&icm->request_lock); 325 326 tb_cfg_request_put(req); 327 328 if (res.err != -ETIMEDOUT) 329 return res.err == 1 ? -EIO : res.err; 330 331 usleep_range(20, 50); 332 } while (retries--); 333 334 return -ETIMEDOUT; 335 } 336 337 /* 338 * If rescan is queued to run (we are resuming), postpone it to give the 339 * firmware some more time to send device connected notifications for next 340 * devices in the chain. 341 */ 342 static void icm_postpone_rescan(struct tb *tb) 343 { 344 struct icm *icm = tb_priv(tb); 345 346 if (delayed_work_pending(&icm->rescan_work)) 347 mod_delayed_work(tb->wq, &icm->rescan_work, 348 msecs_to_jiffies(500)); 349 } 350 351 static void icm_veto_begin(struct tb *tb) 352 { 353 struct icm *icm = tb_priv(tb); 354 355 if (!icm->veto) { 356 icm->veto = true; 357 /* Keep the domain powered while veto is in effect */ 358 pm_runtime_get(&tb->dev); 359 } 360 } 361 362 static void icm_veto_end(struct tb *tb) 363 { 364 struct icm *icm = tb_priv(tb); 365 366 if (icm->veto) { 367 icm->veto = false; 368 /* Allow the domain suspend now */ 369 pm_runtime_mark_last_busy(&tb->dev); 370 pm_runtime_put_autosuspend(&tb->dev); 371 } 372 } 373 374 static bool icm_firmware_running(const struct tb_nhi *nhi) 375 { 376 u32 val; 377 378 val = ioread32(nhi->iobase + REG_FW_STS); 379 return !!(val & REG_FW_STS_ICM_EN); 380 } 381 382 static bool icm_fr_is_supported(struct tb *tb) 383 { 384 return !x86_apple_machine; 385 } 386 387 static inline int icm_fr_get_switch_index(u32 port) 388 { 389 int index; 390 391 if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT) 392 return 0; 393 394 index = port >> ICM_PORT_INDEX_SHIFT; 395 return index != 0xff ? index : 0; 396 } 397 398 static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) 399 { 400 struct icm_fr_pkg_get_topology_response *switches, *sw; 401 struct icm_fr_pkg_get_topology request = { 402 .hdr = { .code = ICM_GET_TOPOLOGY }, 403 }; 404 size_t npackets = ICM_GET_TOPOLOGY_PACKETS; 405 int ret, index; 406 u8 i; 407 408 switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL); 409 if (!switches) 410 return -ENOMEM; 411 412 ret = icm_request(tb, &request, sizeof(request), switches, 413 sizeof(*switches), npackets, ICM_TIMEOUT); 414 if (ret) 415 goto err_free; 416 417 sw = &switches[0]; 418 index = icm_fr_get_switch_index(sw->ports[link]); 419 if (!index) { 420 ret = -ENODEV; 421 goto err_free; 422 } 423 424 sw = &switches[index]; 425 for (i = 1; i < depth; i++) { 426 unsigned int j; 427 428 if (!(sw->first_data & ICM_SWITCH_USED)) { 429 ret = -ENODEV; 430 goto err_free; 431 } 432 433 for (j = 0; j < ARRAY_SIZE(sw->ports); j++) { 434 index = icm_fr_get_switch_index(sw->ports[j]); 435 if (index > sw->switch_index) { 436 sw = &switches[index]; 437 break; 438 } 439 } 440 } 441 442 *route = get_route(sw->route_hi, sw->route_lo); 443 444 err_free: 445 kfree(switches); 446 return ret; 447 } 448 449 static void icm_fr_save_devices(struct tb *tb) 450 { 451 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0); 452 } 453 454 static int 455 icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level, 456 u8 *proto_version, size_t *nboot_acl, bool *rpm) 457 { 458 struct icm_fr_pkg_driver_ready_response reply; 459 struct icm_pkg_driver_ready request = { 460 .hdr.code = ICM_DRIVER_READY, 461 }; 462 int ret; 463 464 memset(&reply, 0, sizeof(reply)); 465 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 466 1, ICM_TIMEOUT); 467 if (ret) 468 return ret; 469 470 if (security_level) 471 *security_level = reply.security_level & ICM_FR_SLEVEL_MASK; 472 473 return 0; 474 } 475 476 static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw) 477 { 478 struct icm_fr_pkg_approve_device request; 479 struct icm_fr_pkg_approve_device reply; 480 int ret; 481 482 memset(&request, 0, sizeof(request)); 483 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 484 request.hdr.code = ICM_APPROVE_DEVICE; 485 request.connection_id = sw->connection_id; 486 request.connection_key = sw->connection_key; 487 488 memset(&reply, 0, sizeof(reply)); 489 /* Use larger timeout as establishing tunnels can take some time */ 490 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 491 1, ICM_APPROVE_TIMEOUT); 492 if (ret) 493 return ret; 494 495 if (reply.hdr.flags & ICM_FLAGS_ERROR) { 496 tb_warn(tb, "PCIe tunnel creation failed\n"); 497 return -EIO; 498 } 499 500 return 0; 501 } 502 503 static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw) 504 { 505 struct icm_fr_pkg_add_device_key request; 506 struct icm_fr_pkg_add_device_key_response reply; 507 int ret; 508 509 memset(&request, 0, sizeof(request)); 510 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 511 request.hdr.code = ICM_ADD_DEVICE_KEY; 512 request.connection_id = sw->connection_id; 513 request.connection_key = sw->connection_key; 514 memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); 515 516 memset(&reply, 0, sizeof(reply)); 517 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 518 1, ICM_TIMEOUT); 519 if (ret) 520 return ret; 521 522 if (reply.hdr.flags & ICM_FLAGS_ERROR) { 523 tb_warn(tb, "Adding key to switch failed\n"); 524 return -EIO; 525 } 526 527 return 0; 528 } 529 530 static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, 531 const u8 *challenge, u8 *response) 532 { 533 struct icm_fr_pkg_challenge_device request; 534 struct icm_fr_pkg_challenge_device_response reply; 535 int ret; 536 537 memset(&request, 0, sizeof(request)); 538 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 539 request.hdr.code = ICM_CHALLENGE_DEVICE; 540 request.connection_id = sw->connection_id; 541 request.connection_key = sw->connection_key; 542 memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE); 543 544 memset(&reply, 0, sizeof(reply)); 545 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 546 1, ICM_TIMEOUT); 547 if (ret) 548 return ret; 549 550 if (reply.hdr.flags & ICM_FLAGS_ERROR) 551 return -EKEYREJECTED; 552 if (reply.hdr.flags & ICM_FLAGS_NO_KEY) 553 return -ENOKEY; 554 555 memcpy(response, reply.response, TB_SWITCH_KEY_SIZE); 556 557 return 0; 558 } 559 560 static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, 561 int transmit_path, int transmit_ring, 562 int receive_path, int receive_ring) 563 { 564 struct icm_fr_pkg_approve_xdomain_response reply; 565 struct icm_fr_pkg_approve_xdomain request; 566 int ret; 567 568 memset(&request, 0, sizeof(request)); 569 request.hdr.code = ICM_APPROVE_XDOMAIN; 570 request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link; 571 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); 572 573 request.transmit_path = transmit_path; 574 request.transmit_ring = transmit_ring; 575 request.receive_path = receive_path; 576 request.receive_ring = receive_ring; 577 578 memset(&reply, 0, sizeof(reply)); 579 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 580 1, ICM_TIMEOUT); 581 if (ret) 582 return ret; 583 584 if (reply.hdr.flags & ICM_FLAGS_ERROR) 585 return -EIO; 586 587 return 0; 588 } 589 590 static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, 591 int transmit_path, int transmit_ring, 592 int receive_path, int receive_ring) 593 { 594 u8 phy_port; 595 u8 cmd; 596 597 phy_port = tb_phy_port_from_link(xd->link); 598 if (phy_port == 0) 599 cmd = NHI_MAILBOX_DISCONNECT_PA; 600 else 601 cmd = NHI_MAILBOX_DISCONNECT_PB; 602 603 nhi_mailbox_cmd(tb->nhi, cmd, 1); 604 usleep_range(10, 50); 605 nhi_mailbox_cmd(tb->nhi, cmd, 2); 606 return 0; 607 } 608 609 static struct tb_switch *alloc_switch(struct tb_switch *parent_sw, u64 route, 610 const uuid_t *uuid) 611 { 612 struct tb *tb = parent_sw->tb; 613 struct tb_switch *sw; 614 615 sw = tb_switch_alloc(tb, &parent_sw->dev, route); 616 if (IS_ERR(sw)) { 617 tb_warn(tb, "failed to allocate switch at %llx\n", route); 618 return sw; 619 } 620 621 sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL); 622 if (!sw->uuid) { 623 tb_switch_put(sw); 624 return ERR_PTR(-ENOMEM); 625 } 626 627 init_completion(&sw->rpm_complete); 628 return sw; 629 } 630 631 static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw) 632 { 633 u64 route = tb_route(sw); 634 int ret; 635 636 /* Link the two switches now */ 637 tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); 638 tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw); 639 640 ret = tb_switch_add(sw); 641 if (ret) 642 tb_port_at(tb_route(sw), parent_sw)->remote = NULL; 643 644 return ret; 645 } 646 647 static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw, 648 u64 route, u8 connection_id, u8 connection_key, 649 u8 link, u8 depth, bool boot) 650 { 651 /* Disconnect from parent */ 652 tb_port_at(tb_route(sw), parent_sw)->remote = NULL; 653 /* Re-connect via updated port*/ 654 tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); 655 656 /* Update with the new addressing information */ 657 sw->config.route_hi = upper_32_bits(route); 658 sw->config.route_lo = lower_32_bits(route); 659 sw->connection_id = connection_id; 660 sw->connection_key = connection_key; 661 sw->link = link; 662 sw->depth = depth; 663 sw->boot = boot; 664 665 /* This switch still exists */ 666 sw->is_unplugged = false; 667 668 /* Runtime resume is now complete */ 669 complete(&sw->rpm_complete); 670 } 671 672 static void remove_switch(struct tb_switch *sw) 673 { 674 struct tb_switch *parent_sw; 675 676 parent_sw = tb_to_switch(sw->dev.parent); 677 tb_port_at(tb_route(sw), parent_sw)->remote = NULL; 678 tb_switch_remove(sw); 679 } 680 681 static void add_xdomain(struct tb_switch *sw, u64 route, 682 const uuid_t *local_uuid, const uuid_t *remote_uuid, 683 u8 link, u8 depth) 684 { 685 struct tb_xdomain *xd; 686 687 pm_runtime_get_sync(&sw->dev); 688 689 xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid); 690 if (!xd) 691 goto out; 692 693 xd->link = link; 694 xd->depth = depth; 695 696 tb_port_at(route, sw)->xdomain = xd; 697 698 tb_xdomain_add(xd); 699 700 out: 701 pm_runtime_mark_last_busy(&sw->dev); 702 pm_runtime_put_autosuspend(&sw->dev); 703 } 704 705 static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link) 706 { 707 xd->link = link; 708 xd->route = route; 709 xd->is_unplugged = false; 710 } 711 712 static void remove_xdomain(struct tb_xdomain *xd) 713 { 714 struct tb_switch *sw; 715 716 sw = tb_to_switch(xd->dev.parent); 717 tb_port_at(xd->route, sw)->xdomain = NULL; 718 tb_xdomain_remove(xd); 719 } 720 721 static void 722 icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) 723 { 724 const struct icm_fr_event_device_connected *pkg = 725 (const struct icm_fr_event_device_connected *)hdr; 726 enum tb_security_level security_level; 727 struct tb_switch *sw, *parent_sw; 728 bool boot, dual_lane, speed_gen3; 729 struct icm *icm = tb_priv(tb); 730 bool authorized = false; 731 struct tb_xdomain *xd; 732 u8 link, depth; 733 u64 route; 734 int ret; 735 736 icm_postpone_rescan(tb); 737 738 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; 739 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> 740 ICM_LINK_INFO_DEPTH_SHIFT; 741 authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; 742 security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> 743 ICM_FLAGS_SLEVEL_SHIFT; 744 boot = pkg->link_info & ICM_LINK_INFO_BOOT; 745 dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE; 746 speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3; 747 748 if (pkg->link_info & ICM_LINK_INFO_REJECTED) { 749 tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n", 750 link, depth); 751 return; 752 } 753 754 sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid); 755 if (sw) { 756 u8 phy_port, sw_phy_port; 757 758 parent_sw = tb_to_switch(sw->dev.parent); 759 sw_phy_port = tb_phy_port_from_link(sw->link); 760 phy_port = tb_phy_port_from_link(link); 761 762 /* 763 * On resume ICM will send us connected events for the 764 * devices that still are present. However, that 765 * information might have changed for example by the 766 * fact that a switch on a dual-link connection might 767 * have been enumerated using the other link now. Make 768 * sure our book keeping matches that. 769 */ 770 if (sw->depth == depth && sw_phy_port == phy_port && 771 !!sw->authorized == authorized) { 772 /* 773 * It was enumerated through another link so update 774 * route string accordingly. 775 */ 776 if (sw->link != link) { 777 ret = icm->get_route(tb, link, depth, &route); 778 if (ret) { 779 tb_err(tb, "failed to update route string for switch at %u.%u\n", 780 link, depth); 781 tb_switch_put(sw); 782 return; 783 } 784 } else { 785 route = tb_route(sw); 786 } 787 788 update_switch(parent_sw, sw, route, pkg->connection_id, 789 pkg->connection_key, link, depth, boot); 790 tb_switch_put(sw); 791 return; 792 } 793 794 /* 795 * User connected the same switch to another physical 796 * port or to another part of the topology. Remove the 797 * existing switch now before adding the new one. 798 */ 799 remove_switch(sw); 800 tb_switch_put(sw); 801 } 802 803 /* 804 * If the switch was not found by UUID, look for a switch on 805 * same physical port (taking possible link aggregation into 806 * account) and depth. If we found one it is definitely a stale 807 * one so remove it first. 808 */ 809 sw = tb_switch_find_by_link_depth(tb, link, depth); 810 if (!sw) { 811 u8 dual_link; 812 813 dual_link = dual_link_from_link(link); 814 if (dual_link) 815 sw = tb_switch_find_by_link_depth(tb, dual_link, depth); 816 } 817 if (sw) { 818 remove_switch(sw); 819 tb_switch_put(sw); 820 } 821 822 /* Remove existing XDomain connection if found */ 823 xd = tb_xdomain_find_by_link_depth(tb, link, depth); 824 if (xd) { 825 remove_xdomain(xd); 826 tb_xdomain_put(xd); 827 } 828 829 parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1); 830 if (!parent_sw) { 831 tb_err(tb, "failed to find parent switch for %u.%u\n", 832 link, depth); 833 return; 834 } 835 836 ret = icm->get_route(tb, link, depth, &route); 837 if (ret) { 838 tb_err(tb, "failed to find route string for switch at %u.%u\n", 839 link, depth); 840 tb_switch_put(parent_sw); 841 return; 842 } 843 844 pm_runtime_get_sync(&parent_sw->dev); 845 846 sw = alloc_switch(parent_sw, route, &pkg->ep_uuid); 847 if (!IS_ERR(sw)) { 848 sw->connection_id = pkg->connection_id; 849 sw->connection_key = pkg->connection_key; 850 sw->link = link; 851 sw->depth = depth; 852 sw->authorized = authorized; 853 sw->security_level = security_level; 854 sw->boot = boot; 855 sw->link_speed = speed_gen3 ? 20 : 10; 856 sw->link_width = dual_lane ? 2 : 1; 857 sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name)); 858 859 if (add_switch(parent_sw, sw)) 860 tb_switch_put(sw); 861 } 862 863 pm_runtime_mark_last_busy(&parent_sw->dev); 864 pm_runtime_put_autosuspend(&parent_sw->dev); 865 866 tb_switch_put(parent_sw); 867 } 868 869 static void 870 icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) 871 { 872 const struct icm_fr_event_device_disconnected *pkg = 873 (const struct icm_fr_event_device_disconnected *)hdr; 874 struct tb_switch *sw; 875 u8 link, depth; 876 877 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; 878 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> 879 ICM_LINK_INFO_DEPTH_SHIFT; 880 881 if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) { 882 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth); 883 return; 884 } 885 886 sw = tb_switch_find_by_link_depth(tb, link, depth); 887 if (!sw) { 888 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link, 889 depth); 890 return; 891 } 892 893 pm_runtime_get_sync(sw->dev.parent); 894 895 remove_switch(sw); 896 897 pm_runtime_mark_last_busy(sw->dev.parent); 898 pm_runtime_put_autosuspend(sw->dev.parent); 899 900 tb_switch_put(sw); 901 } 902 903 static void 904 icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr) 905 { 906 const struct icm_fr_event_xdomain_connected *pkg = 907 (const struct icm_fr_event_xdomain_connected *)hdr; 908 struct tb_xdomain *xd; 909 struct tb_switch *sw; 910 u8 link, depth; 911 u64 route; 912 913 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; 914 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> 915 ICM_LINK_INFO_DEPTH_SHIFT; 916 917 if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) { 918 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth); 919 return; 920 } 921 922 route = get_route(pkg->local_route_hi, pkg->local_route_lo); 923 924 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); 925 if (xd) { 926 u8 xd_phy_port, phy_port; 927 928 xd_phy_port = phy_port_from_route(xd->route, xd->depth); 929 phy_port = phy_port_from_route(route, depth); 930 931 if (xd->depth == depth && xd_phy_port == phy_port) { 932 update_xdomain(xd, route, link); 933 tb_xdomain_put(xd); 934 return; 935 } 936 937 /* 938 * If we find an existing XDomain connection remove it 939 * now. We need to go through login handshake and 940 * everything anyway to be able to re-establish the 941 * connection. 942 */ 943 remove_xdomain(xd); 944 tb_xdomain_put(xd); 945 } 946 947 /* 948 * Look if there already exists an XDomain in the same place 949 * than the new one and in that case remove it because it is 950 * most likely another host that got disconnected. 951 */ 952 xd = tb_xdomain_find_by_link_depth(tb, link, depth); 953 if (!xd) { 954 u8 dual_link; 955 956 dual_link = dual_link_from_link(link); 957 if (dual_link) 958 xd = tb_xdomain_find_by_link_depth(tb, dual_link, 959 depth); 960 } 961 if (xd) { 962 remove_xdomain(xd); 963 tb_xdomain_put(xd); 964 } 965 966 /* 967 * If the user disconnected a switch during suspend and 968 * connected another host to the same port, remove the switch 969 * first. 970 */ 971 sw = tb_switch_find_by_route(tb, route); 972 if (sw) { 973 remove_switch(sw); 974 tb_switch_put(sw); 975 } 976 977 sw = tb_switch_find_by_link_depth(tb, link, depth); 978 if (!sw) { 979 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link, 980 depth); 981 return; 982 } 983 984 add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link, 985 depth); 986 tb_switch_put(sw); 987 } 988 989 static void 990 icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) 991 { 992 const struct icm_fr_event_xdomain_disconnected *pkg = 993 (const struct icm_fr_event_xdomain_disconnected *)hdr; 994 struct tb_xdomain *xd; 995 996 /* 997 * If the connection is through one or multiple devices, the 998 * XDomain device is removed along with them so it is fine if we 999 * cannot find it here. 1000 */ 1001 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); 1002 if (xd) { 1003 remove_xdomain(xd); 1004 tb_xdomain_put(xd); 1005 } 1006 } 1007 1008 static int icm_tr_cio_reset(struct tb *tb) 1009 { 1010 return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x777, BIT(1)); 1011 } 1012 1013 static int 1014 icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level, 1015 u8 *proto_version, size_t *nboot_acl, bool *rpm) 1016 { 1017 struct icm_tr_pkg_driver_ready_response reply; 1018 struct icm_pkg_driver_ready request = { 1019 .hdr.code = ICM_DRIVER_READY, 1020 }; 1021 int ret; 1022 1023 memset(&reply, 0, sizeof(reply)); 1024 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1025 1, 20000); 1026 if (ret) 1027 return ret; 1028 1029 if (security_level) 1030 *security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK; 1031 if (proto_version) 1032 *proto_version = (reply.info & ICM_TR_INFO_PROTO_VERSION_MASK) >> 1033 ICM_TR_INFO_PROTO_VERSION_SHIFT; 1034 if (nboot_acl) 1035 *nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >> 1036 ICM_TR_INFO_BOOT_ACL_SHIFT; 1037 if (rpm) 1038 *rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3); 1039 1040 return 0; 1041 } 1042 1043 static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw) 1044 { 1045 struct icm_tr_pkg_approve_device request; 1046 struct icm_tr_pkg_approve_device reply; 1047 int ret; 1048 1049 memset(&request, 0, sizeof(request)); 1050 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 1051 request.hdr.code = ICM_APPROVE_DEVICE; 1052 request.route_lo = sw->config.route_lo; 1053 request.route_hi = sw->config.route_hi; 1054 request.connection_id = sw->connection_id; 1055 1056 memset(&reply, 0, sizeof(reply)); 1057 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1058 1, ICM_APPROVE_TIMEOUT); 1059 if (ret) 1060 return ret; 1061 1062 if (reply.hdr.flags & ICM_FLAGS_ERROR) { 1063 tb_warn(tb, "PCIe tunnel creation failed\n"); 1064 return -EIO; 1065 } 1066 1067 return 0; 1068 } 1069 1070 static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw) 1071 { 1072 struct icm_tr_pkg_add_device_key_response reply; 1073 struct icm_tr_pkg_add_device_key request; 1074 int ret; 1075 1076 memset(&request, 0, sizeof(request)); 1077 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 1078 request.hdr.code = ICM_ADD_DEVICE_KEY; 1079 request.route_lo = sw->config.route_lo; 1080 request.route_hi = sw->config.route_hi; 1081 request.connection_id = sw->connection_id; 1082 memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); 1083 1084 memset(&reply, 0, sizeof(reply)); 1085 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1086 1, ICM_TIMEOUT); 1087 if (ret) 1088 return ret; 1089 1090 if (reply.hdr.flags & ICM_FLAGS_ERROR) { 1091 tb_warn(tb, "Adding key to switch failed\n"); 1092 return -EIO; 1093 } 1094 1095 return 0; 1096 } 1097 1098 static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, 1099 const u8 *challenge, u8 *response) 1100 { 1101 struct icm_tr_pkg_challenge_device_response reply; 1102 struct icm_tr_pkg_challenge_device request; 1103 int ret; 1104 1105 memset(&request, 0, sizeof(request)); 1106 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 1107 request.hdr.code = ICM_CHALLENGE_DEVICE; 1108 request.route_lo = sw->config.route_lo; 1109 request.route_hi = sw->config.route_hi; 1110 request.connection_id = sw->connection_id; 1111 memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE); 1112 1113 memset(&reply, 0, sizeof(reply)); 1114 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1115 1, ICM_TIMEOUT); 1116 if (ret) 1117 return ret; 1118 1119 if (reply.hdr.flags & ICM_FLAGS_ERROR) 1120 return -EKEYREJECTED; 1121 if (reply.hdr.flags & ICM_FLAGS_NO_KEY) 1122 return -ENOKEY; 1123 1124 memcpy(response, reply.response, TB_SWITCH_KEY_SIZE); 1125 1126 return 0; 1127 } 1128 1129 static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, 1130 int transmit_path, int transmit_ring, 1131 int receive_path, int receive_ring) 1132 { 1133 struct icm_tr_pkg_approve_xdomain_response reply; 1134 struct icm_tr_pkg_approve_xdomain request; 1135 int ret; 1136 1137 memset(&request, 0, sizeof(request)); 1138 request.hdr.code = ICM_APPROVE_XDOMAIN; 1139 request.route_hi = upper_32_bits(xd->route); 1140 request.route_lo = lower_32_bits(xd->route); 1141 request.transmit_path = transmit_path; 1142 request.transmit_ring = transmit_ring; 1143 request.receive_path = receive_path; 1144 request.receive_ring = receive_ring; 1145 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); 1146 1147 memset(&reply, 0, sizeof(reply)); 1148 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1149 1, ICM_TIMEOUT); 1150 if (ret) 1151 return ret; 1152 1153 if (reply.hdr.flags & ICM_FLAGS_ERROR) 1154 return -EIO; 1155 1156 return 0; 1157 } 1158 1159 static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd, 1160 int stage) 1161 { 1162 struct icm_tr_pkg_disconnect_xdomain_response reply; 1163 struct icm_tr_pkg_disconnect_xdomain request; 1164 int ret; 1165 1166 memset(&request, 0, sizeof(request)); 1167 request.hdr.code = ICM_DISCONNECT_XDOMAIN; 1168 request.stage = stage; 1169 request.route_hi = upper_32_bits(xd->route); 1170 request.route_lo = lower_32_bits(xd->route); 1171 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); 1172 1173 memset(&reply, 0, sizeof(reply)); 1174 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1175 1, ICM_TIMEOUT); 1176 if (ret) 1177 return ret; 1178 1179 if (reply.hdr.flags & ICM_FLAGS_ERROR) 1180 return -EIO; 1181 1182 return 0; 1183 } 1184 1185 static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, 1186 int transmit_path, int transmit_ring, 1187 int receive_path, int receive_ring) 1188 { 1189 int ret; 1190 1191 ret = icm_tr_xdomain_tear_down(tb, xd, 1); 1192 if (ret) 1193 return ret; 1194 1195 usleep_range(10, 50); 1196 return icm_tr_xdomain_tear_down(tb, xd, 2); 1197 } 1198 1199 static void 1200 __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr, 1201 bool force_rtd3) 1202 { 1203 const struct icm_tr_event_device_connected *pkg = 1204 (const struct icm_tr_event_device_connected *)hdr; 1205 bool authorized, boot, dual_lane, speed_gen3; 1206 enum tb_security_level security_level; 1207 struct tb_switch *sw, *parent_sw; 1208 struct tb_xdomain *xd; 1209 u64 route; 1210 1211 icm_postpone_rescan(tb); 1212 1213 /* 1214 * Currently we don't use the QoS information coming with the 1215 * device connected message so simply just ignore that extra 1216 * packet for now. 1217 */ 1218 if (pkg->hdr.packet_id) 1219 return; 1220 1221 route = get_route(pkg->route_hi, pkg->route_lo); 1222 authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; 1223 security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> 1224 ICM_FLAGS_SLEVEL_SHIFT; 1225 boot = pkg->link_info & ICM_LINK_INFO_BOOT; 1226 dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE; 1227 speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3; 1228 1229 if (pkg->link_info & ICM_LINK_INFO_REJECTED) { 1230 tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n", 1231 route); 1232 return; 1233 } 1234 1235 sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid); 1236 if (sw) { 1237 /* Update the switch if it is still in the same place */ 1238 if (tb_route(sw) == route && !!sw->authorized == authorized) { 1239 parent_sw = tb_to_switch(sw->dev.parent); 1240 update_switch(parent_sw, sw, route, pkg->connection_id, 1241 0, 0, 0, boot); 1242 tb_switch_put(sw); 1243 return; 1244 } 1245 1246 remove_switch(sw); 1247 tb_switch_put(sw); 1248 } 1249 1250 /* Another switch with the same address */ 1251 sw = tb_switch_find_by_route(tb, route); 1252 if (sw) { 1253 remove_switch(sw); 1254 tb_switch_put(sw); 1255 } 1256 1257 /* XDomain connection with the same address */ 1258 xd = tb_xdomain_find_by_route(tb, route); 1259 if (xd) { 1260 remove_xdomain(xd); 1261 tb_xdomain_put(xd); 1262 } 1263 1264 parent_sw = tb_switch_find_by_route(tb, get_parent_route(route)); 1265 if (!parent_sw) { 1266 tb_err(tb, "failed to find parent switch for %llx\n", route); 1267 return; 1268 } 1269 1270 pm_runtime_get_sync(&parent_sw->dev); 1271 1272 sw = alloc_switch(parent_sw, route, &pkg->ep_uuid); 1273 if (!IS_ERR(sw)) { 1274 sw->connection_id = pkg->connection_id; 1275 sw->authorized = authorized; 1276 sw->security_level = security_level; 1277 sw->boot = boot; 1278 sw->link_speed = speed_gen3 ? 20 : 10; 1279 sw->link_width = dual_lane ? 2 : 1; 1280 sw->rpm = force_rtd3; 1281 if (!sw->rpm) 1282 sw->rpm = intel_vss_is_rtd3(pkg->ep_name, 1283 sizeof(pkg->ep_name)); 1284 1285 if (add_switch(parent_sw, sw)) 1286 tb_switch_put(sw); 1287 } 1288 1289 pm_runtime_mark_last_busy(&parent_sw->dev); 1290 pm_runtime_put_autosuspend(&parent_sw->dev); 1291 1292 tb_switch_put(parent_sw); 1293 } 1294 1295 static void 1296 icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) 1297 { 1298 __icm_tr_device_connected(tb, hdr, false); 1299 } 1300 1301 static void 1302 icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) 1303 { 1304 const struct icm_tr_event_device_disconnected *pkg = 1305 (const struct icm_tr_event_device_disconnected *)hdr; 1306 struct tb_switch *sw; 1307 u64 route; 1308 1309 route = get_route(pkg->route_hi, pkg->route_lo); 1310 1311 sw = tb_switch_find_by_route(tb, route); 1312 if (!sw) { 1313 tb_warn(tb, "no switch exists at %llx, ignoring\n", route); 1314 return; 1315 } 1316 pm_runtime_get_sync(sw->dev.parent); 1317 1318 remove_switch(sw); 1319 1320 pm_runtime_mark_last_busy(sw->dev.parent); 1321 pm_runtime_put_autosuspend(sw->dev.parent); 1322 1323 tb_switch_put(sw); 1324 } 1325 1326 static void 1327 icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr) 1328 { 1329 const struct icm_tr_event_xdomain_connected *pkg = 1330 (const struct icm_tr_event_xdomain_connected *)hdr; 1331 struct tb_xdomain *xd; 1332 struct tb_switch *sw; 1333 u64 route; 1334 1335 if (!tb->root_switch) 1336 return; 1337 1338 route = get_route(pkg->local_route_hi, pkg->local_route_lo); 1339 1340 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); 1341 if (xd) { 1342 if (xd->route == route) { 1343 update_xdomain(xd, route, 0); 1344 tb_xdomain_put(xd); 1345 return; 1346 } 1347 1348 remove_xdomain(xd); 1349 tb_xdomain_put(xd); 1350 } 1351 1352 /* An existing xdomain with the same address */ 1353 xd = tb_xdomain_find_by_route(tb, route); 1354 if (xd) { 1355 remove_xdomain(xd); 1356 tb_xdomain_put(xd); 1357 } 1358 1359 /* 1360 * If the user disconnected a switch during suspend and 1361 * connected another host to the same port, remove the switch 1362 * first. 1363 */ 1364 sw = tb_switch_find_by_route(tb, route); 1365 if (sw) { 1366 remove_switch(sw); 1367 tb_switch_put(sw); 1368 } 1369 1370 sw = tb_switch_find_by_route(tb, get_parent_route(route)); 1371 if (!sw) { 1372 tb_warn(tb, "no switch exists at %llx, ignoring\n", route); 1373 return; 1374 } 1375 1376 add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0); 1377 tb_switch_put(sw); 1378 } 1379 1380 static void 1381 icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) 1382 { 1383 const struct icm_tr_event_xdomain_disconnected *pkg = 1384 (const struct icm_tr_event_xdomain_disconnected *)hdr; 1385 struct tb_xdomain *xd; 1386 u64 route; 1387 1388 route = get_route(pkg->route_hi, pkg->route_lo); 1389 1390 xd = tb_xdomain_find_by_route(tb, route); 1391 if (xd) { 1392 remove_xdomain(xd); 1393 tb_xdomain_put(xd); 1394 } 1395 } 1396 1397 static struct pci_dev *get_upstream_port(struct pci_dev *pdev) 1398 { 1399 struct pci_dev *parent; 1400 1401 parent = pci_upstream_bridge(pdev); 1402 while (parent) { 1403 if (!pci_is_pcie(parent)) 1404 return NULL; 1405 if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM) 1406 break; 1407 parent = pci_upstream_bridge(parent); 1408 } 1409 1410 if (!parent) 1411 return NULL; 1412 1413 switch (parent->device) { 1414 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: 1415 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: 1416 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: 1417 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: 1418 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: 1419 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: 1420 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: 1421 return parent; 1422 } 1423 1424 return NULL; 1425 } 1426 1427 static bool icm_ar_is_supported(struct tb *tb) 1428 { 1429 struct pci_dev *upstream_port; 1430 struct icm *icm = tb_priv(tb); 1431 1432 /* 1433 * Starting from Alpine Ridge we can use ICM on Apple machines 1434 * as well. We just need to reset and re-enable it first. 1435 * However, only start it if explicitly asked by the user. 1436 */ 1437 if (icm_firmware_running(tb->nhi)) 1438 return true; 1439 if (!start_icm) 1440 return false; 1441 1442 /* 1443 * Find the upstream PCIe port in case we need to do reset 1444 * through its vendor specific registers. 1445 */ 1446 upstream_port = get_upstream_port(tb->nhi->pdev); 1447 if (upstream_port) { 1448 int cap; 1449 1450 cap = pci_find_ext_capability(upstream_port, 1451 PCI_EXT_CAP_ID_VNDR); 1452 if (cap > 0) { 1453 icm->upstream_port = upstream_port; 1454 icm->vnd_cap = cap; 1455 1456 return true; 1457 } 1458 } 1459 1460 return false; 1461 } 1462 1463 static int icm_ar_cio_reset(struct tb *tb) 1464 { 1465 return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x50, BIT(9)); 1466 } 1467 1468 static int icm_ar_get_mode(struct tb *tb) 1469 { 1470 struct tb_nhi *nhi = tb->nhi; 1471 int retries = 60; 1472 u32 val; 1473 1474 do { 1475 val = ioread32(nhi->iobase + REG_FW_STS); 1476 if (val & REG_FW_STS_NVM_AUTH_DONE) 1477 break; 1478 msleep(50); 1479 } while (--retries); 1480 1481 if (!retries) { 1482 dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n"); 1483 return -ENODEV; 1484 } 1485 1486 return nhi_mailbox_mode(nhi); 1487 } 1488 1489 static int 1490 icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level, 1491 u8 *proto_version, size_t *nboot_acl, bool *rpm) 1492 { 1493 struct icm_ar_pkg_driver_ready_response reply; 1494 struct icm_pkg_driver_ready request = { 1495 .hdr.code = ICM_DRIVER_READY, 1496 }; 1497 int ret; 1498 1499 memset(&reply, 0, sizeof(reply)); 1500 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1501 1, ICM_TIMEOUT); 1502 if (ret) 1503 return ret; 1504 1505 if (security_level) 1506 *security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK; 1507 if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED)) 1508 *nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >> 1509 ICM_AR_INFO_BOOT_ACL_SHIFT; 1510 if (rpm) 1511 *rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3); 1512 1513 return 0; 1514 } 1515 1516 static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) 1517 { 1518 struct icm_ar_pkg_get_route_response reply; 1519 struct icm_ar_pkg_get_route request = { 1520 .hdr = { .code = ICM_GET_ROUTE }, 1521 .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link, 1522 }; 1523 int ret; 1524 1525 memset(&reply, 0, sizeof(reply)); 1526 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1527 1, ICM_TIMEOUT); 1528 if (ret) 1529 return ret; 1530 1531 if (reply.hdr.flags & ICM_FLAGS_ERROR) 1532 return -EIO; 1533 1534 *route = get_route(reply.route_hi, reply.route_lo); 1535 return 0; 1536 } 1537 1538 static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids) 1539 { 1540 struct icm_ar_pkg_preboot_acl_response reply; 1541 struct icm_ar_pkg_preboot_acl request = { 1542 .hdr = { .code = ICM_PREBOOT_ACL }, 1543 }; 1544 int ret, i; 1545 1546 memset(&reply, 0, sizeof(reply)); 1547 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1548 1, ICM_TIMEOUT); 1549 if (ret) 1550 return ret; 1551 1552 if (reply.hdr.flags & ICM_FLAGS_ERROR) 1553 return -EIO; 1554 1555 for (i = 0; i < nuuids; i++) { 1556 u32 *uuid = (u32 *)&uuids[i]; 1557 1558 uuid[0] = reply.acl[i].uuid_lo; 1559 uuid[1] = reply.acl[i].uuid_hi; 1560 1561 if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) { 1562 /* Map empty entries to null UUID */ 1563 uuid[0] = 0; 1564 uuid[1] = 0; 1565 } else if (uuid[0] != 0 || uuid[1] != 0) { 1566 /* Upper two DWs are always one's */ 1567 uuid[2] = 0xffffffff; 1568 uuid[3] = 0xffffffff; 1569 } 1570 } 1571 1572 return ret; 1573 } 1574 1575 static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids, 1576 size_t nuuids) 1577 { 1578 struct icm_ar_pkg_preboot_acl_response reply; 1579 struct icm_ar_pkg_preboot_acl request = { 1580 .hdr = { 1581 .code = ICM_PREBOOT_ACL, 1582 .flags = ICM_FLAGS_WRITE, 1583 }, 1584 }; 1585 int ret, i; 1586 1587 for (i = 0; i < nuuids; i++) { 1588 const u32 *uuid = (const u32 *)&uuids[i]; 1589 1590 if (uuid_is_null(&uuids[i])) { 1591 /* 1592 * Map null UUID to the empty (all one) entries 1593 * for ICM. 1594 */ 1595 request.acl[i].uuid_lo = 0xffffffff; 1596 request.acl[i].uuid_hi = 0xffffffff; 1597 } else { 1598 /* Two high DWs need to be set to all one */ 1599 if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff) 1600 return -EINVAL; 1601 1602 request.acl[i].uuid_lo = uuid[0]; 1603 request.acl[i].uuid_hi = uuid[1]; 1604 } 1605 } 1606 1607 memset(&reply, 0, sizeof(reply)); 1608 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1609 1, ICM_TIMEOUT); 1610 if (ret) 1611 return ret; 1612 1613 if (reply.hdr.flags & ICM_FLAGS_ERROR) 1614 return -EIO; 1615 1616 return 0; 1617 } 1618 1619 static int 1620 icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level, 1621 u8 *proto_version, size_t *nboot_acl, bool *rpm) 1622 { 1623 struct icm_tr_pkg_driver_ready_response reply; 1624 struct icm_pkg_driver_ready request = { 1625 .hdr.code = ICM_DRIVER_READY, 1626 }; 1627 int ret; 1628 1629 memset(&reply, 0, sizeof(reply)); 1630 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1631 1, 20000); 1632 if (ret) 1633 return ret; 1634 1635 if (proto_version) 1636 *proto_version = (reply.info & ICM_TR_INFO_PROTO_VERSION_MASK) >> 1637 ICM_TR_INFO_PROTO_VERSION_SHIFT; 1638 1639 /* Ice Lake always supports RTD3 */ 1640 if (rpm) 1641 *rpm = true; 1642 1643 return 0; 1644 } 1645 1646 static void icm_icl_set_uuid(struct tb *tb) 1647 { 1648 struct tb_nhi *nhi = tb->nhi; 1649 u32 uuid[4]; 1650 1651 pci_read_config_dword(nhi->pdev, VS_CAP_10, &uuid[0]); 1652 pci_read_config_dword(nhi->pdev, VS_CAP_11, &uuid[1]); 1653 uuid[2] = 0xffffffff; 1654 uuid[3] = 0xffffffff; 1655 1656 tb->root_switch->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 1657 } 1658 1659 static void 1660 icm_icl_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) 1661 { 1662 __icm_tr_device_connected(tb, hdr, true); 1663 } 1664 1665 static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr) 1666 { 1667 const struct icm_icl_event_rtd3_veto *pkg = 1668 (const struct icm_icl_event_rtd3_veto *)hdr; 1669 1670 tb_dbg(tb, "ICM rtd3 veto=0x%08x\n", pkg->veto_reason); 1671 1672 if (pkg->veto_reason) 1673 icm_veto_begin(tb); 1674 else 1675 icm_veto_end(tb); 1676 } 1677 1678 static bool icm_tgl_is_supported(struct tb *tb) 1679 { 1680 u32 val; 1681 1682 /* 1683 * If the firmware is not running use software CM. This platform 1684 * should fully support both. 1685 */ 1686 val = ioread32(tb->nhi->iobase + REG_FW_STS); 1687 return !!(val & REG_FW_STS_NVM_AUTH_DONE); 1688 } 1689 1690 static void icm_handle_notification(struct work_struct *work) 1691 { 1692 struct icm_notification *n = container_of(work, typeof(*n), work); 1693 struct tb *tb = n->tb; 1694 struct icm *icm = tb_priv(tb); 1695 1696 mutex_lock(&tb->lock); 1697 1698 /* 1699 * When the domain is stopped we flush its workqueue but before 1700 * that the root switch is removed. In that case we should treat 1701 * the queued events as being canceled. 1702 */ 1703 if (tb->root_switch) { 1704 switch (n->pkg->code) { 1705 case ICM_EVENT_DEVICE_CONNECTED: 1706 icm->device_connected(tb, n->pkg); 1707 break; 1708 case ICM_EVENT_DEVICE_DISCONNECTED: 1709 icm->device_disconnected(tb, n->pkg); 1710 break; 1711 case ICM_EVENT_XDOMAIN_CONNECTED: 1712 if (tb_is_xdomain_enabled()) 1713 icm->xdomain_connected(tb, n->pkg); 1714 break; 1715 case ICM_EVENT_XDOMAIN_DISCONNECTED: 1716 if (tb_is_xdomain_enabled()) 1717 icm->xdomain_disconnected(tb, n->pkg); 1718 break; 1719 case ICM_EVENT_RTD3_VETO: 1720 icm->rtd3_veto(tb, n->pkg); 1721 break; 1722 } 1723 } 1724 1725 mutex_unlock(&tb->lock); 1726 1727 kfree(n->pkg); 1728 kfree(n); 1729 } 1730 1731 static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, 1732 const void *buf, size_t size) 1733 { 1734 struct icm_notification *n; 1735 1736 n = kmalloc(sizeof(*n), GFP_KERNEL); 1737 if (!n) 1738 return; 1739 1740 INIT_WORK(&n->work, icm_handle_notification); 1741 n->pkg = kmemdup(buf, size, GFP_KERNEL); 1742 n->tb = tb; 1743 1744 queue_work(tb->wq, &n->work); 1745 } 1746 1747 static int 1748 __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level, 1749 u8 *proto_version, size_t *nboot_acl, bool *rpm) 1750 { 1751 struct icm *icm = tb_priv(tb); 1752 unsigned int retries = 50; 1753 int ret; 1754 1755 ret = icm->driver_ready(tb, security_level, proto_version, nboot_acl, 1756 rpm); 1757 if (ret) { 1758 tb_err(tb, "failed to send driver ready to ICM\n"); 1759 return ret; 1760 } 1761 1762 /* 1763 * Hold on here until the switch config space is accessible so 1764 * that we can read root switch config successfully. 1765 */ 1766 do { 1767 struct tb_cfg_result res; 1768 u32 tmp; 1769 1770 res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH, 1771 0, 1, 100); 1772 if (!res.err) 1773 return 0; 1774 1775 msleep(50); 1776 } while (--retries); 1777 1778 tb_err(tb, "failed to read root switch config space, giving up\n"); 1779 return -ETIMEDOUT; 1780 } 1781 1782 static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi) 1783 { 1784 struct icm *icm = tb_priv(tb); 1785 u32 val; 1786 1787 if (!icm->upstream_port) 1788 return -ENODEV; 1789 1790 /* Put ARC to wait for CIO reset event to happen */ 1791 val = ioread32(nhi->iobase + REG_FW_STS); 1792 val |= REG_FW_STS_CIO_RESET_REQ; 1793 iowrite32(val, nhi->iobase + REG_FW_STS); 1794 1795 /* Re-start ARC */ 1796 val = ioread32(nhi->iobase + REG_FW_STS); 1797 val |= REG_FW_STS_ICM_EN_INVERT; 1798 val |= REG_FW_STS_ICM_EN_CPU; 1799 iowrite32(val, nhi->iobase + REG_FW_STS); 1800 1801 /* Trigger CIO reset now */ 1802 return icm->cio_reset(tb); 1803 } 1804 1805 static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi) 1806 { 1807 unsigned int retries = 10; 1808 int ret; 1809 u32 val; 1810 1811 /* Check if the ICM firmware is already running */ 1812 if (icm_firmware_running(nhi)) 1813 return 0; 1814 1815 dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n"); 1816 1817 ret = icm_firmware_reset(tb, nhi); 1818 if (ret) 1819 return ret; 1820 1821 /* Wait until the ICM firmware tells us it is up and running */ 1822 do { 1823 /* Check that the ICM firmware is running */ 1824 val = ioread32(nhi->iobase + REG_FW_STS); 1825 if (val & REG_FW_STS_NVM_AUTH_DONE) 1826 return 0; 1827 1828 msleep(300); 1829 } while (--retries); 1830 1831 return -ETIMEDOUT; 1832 } 1833 1834 static int icm_reset_phy_port(struct tb *tb, int phy_port) 1835 { 1836 struct icm *icm = tb_priv(tb); 1837 u32 state0, state1; 1838 int port0, port1; 1839 u32 val0, val1; 1840 int ret; 1841 1842 if (!icm->upstream_port) 1843 return 0; 1844 1845 if (phy_port) { 1846 port0 = 3; 1847 port1 = 4; 1848 } else { 1849 port0 = 1; 1850 port1 = 2; 1851 } 1852 1853 /* 1854 * Read link status of both null ports belonging to a single 1855 * physical port. 1856 */ 1857 ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0); 1858 if (ret) 1859 return ret; 1860 ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1); 1861 if (ret) 1862 return ret; 1863 1864 state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK; 1865 state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT; 1866 state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK; 1867 state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT; 1868 1869 /* If they are both up we need to reset them now */ 1870 if (state0 != TB_PORT_UP || state1 != TB_PORT_UP) 1871 return 0; 1872 1873 val0 |= PHY_PORT_CS1_LINK_DISABLE; 1874 ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0); 1875 if (ret) 1876 return ret; 1877 1878 val1 |= PHY_PORT_CS1_LINK_DISABLE; 1879 ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1); 1880 if (ret) 1881 return ret; 1882 1883 /* Wait a bit and then re-enable both ports */ 1884 usleep_range(10, 100); 1885 1886 ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0); 1887 if (ret) 1888 return ret; 1889 ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1); 1890 if (ret) 1891 return ret; 1892 1893 val0 &= ~PHY_PORT_CS1_LINK_DISABLE; 1894 ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0); 1895 if (ret) 1896 return ret; 1897 1898 val1 &= ~PHY_PORT_CS1_LINK_DISABLE; 1899 return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1); 1900 } 1901 1902 static int icm_firmware_init(struct tb *tb) 1903 { 1904 struct icm *icm = tb_priv(tb); 1905 struct tb_nhi *nhi = tb->nhi; 1906 int ret; 1907 1908 ret = icm_firmware_start(tb, nhi); 1909 if (ret) { 1910 dev_err(&nhi->pdev->dev, "could not start ICM firmware\n"); 1911 return ret; 1912 } 1913 1914 if (icm->get_mode) { 1915 ret = icm->get_mode(tb); 1916 1917 switch (ret) { 1918 case NHI_FW_SAFE_MODE: 1919 icm->safe_mode = true; 1920 break; 1921 1922 case NHI_FW_CM_MODE: 1923 /* Ask ICM to accept all Thunderbolt devices */ 1924 nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0); 1925 break; 1926 1927 default: 1928 if (ret < 0) 1929 return ret; 1930 1931 tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret); 1932 return -ENODEV; 1933 } 1934 } 1935 1936 /* 1937 * Reset both physical ports if there is anything connected to 1938 * them already. 1939 */ 1940 ret = icm_reset_phy_port(tb, 0); 1941 if (ret) 1942 dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n"); 1943 ret = icm_reset_phy_port(tb, 1); 1944 if (ret) 1945 dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n"); 1946 1947 return 0; 1948 } 1949 1950 static int icm_driver_ready(struct tb *tb) 1951 { 1952 struct icm *icm = tb_priv(tb); 1953 int ret; 1954 1955 ret = icm_firmware_init(tb); 1956 if (ret) 1957 return ret; 1958 1959 if (icm->safe_mode) { 1960 tb_info(tb, "Thunderbolt host controller is in safe mode.\n"); 1961 tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n"); 1962 tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n"); 1963 return 0; 1964 } 1965 1966 ret = __icm_driver_ready(tb, &tb->security_level, &icm->proto_version, 1967 &tb->nboot_acl, &icm->rpm); 1968 if (ret) 1969 return ret; 1970 1971 /* 1972 * Make sure the number of supported preboot ACL matches what we 1973 * expect or disable the whole feature. 1974 */ 1975 if (tb->nboot_acl > icm->max_boot_acl) 1976 tb->nboot_acl = 0; 1977 1978 if (icm->proto_version >= 3) 1979 tb_dbg(tb, "USB4 proxy operations supported\n"); 1980 1981 return 0; 1982 } 1983 1984 static int icm_suspend(struct tb *tb) 1985 { 1986 struct icm *icm = tb_priv(tb); 1987 1988 if (icm->save_devices) 1989 icm->save_devices(tb); 1990 1991 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); 1992 return 0; 1993 } 1994 1995 /* 1996 * Mark all switches (except root switch) below this one unplugged. ICM 1997 * firmware will send us an updated list of switches after we have send 1998 * it driver ready command. If a switch is not in that list it will be 1999 * removed when we perform rescan. 2000 */ 2001 static void icm_unplug_children(struct tb_switch *sw) 2002 { 2003 struct tb_port *port; 2004 2005 if (tb_route(sw)) 2006 sw->is_unplugged = true; 2007 2008 tb_switch_for_each_port(sw, port) { 2009 if (port->xdomain) 2010 port->xdomain->is_unplugged = true; 2011 else if (tb_port_has_remote(port)) 2012 icm_unplug_children(port->remote->sw); 2013 } 2014 } 2015 2016 static int complete_rpm(struct device *dev, void *data) 2017 { 2018 struct tb_switch *sw = tb_to_switch(dev); 2019 2020 if (sw) 2021 complete(&sw->rpm_complete); 2022 return 0; 2023 } 2024 2025 static void remove_unplugged_switch(struct tb_switch *sw) 2026 { 2027 struct device *parent = get_device(sw->dev.parent); 2028 2029 pm_runtime_get_sync(parent); 2030 2031 /* 2032 * Signal this and switches below for rpm_complete because 2033 * tb_switch_remove() calls pm_runtime_get_sync() that then waits 2034 * for it. 2035 */ 2036 complete_rpm(&sw->dev, NULL); 2037 bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm); 2038 tb_switch_remove(sw); 2039 2040 pm_runtime_mark_last_busy(parent); 2041 pm_runtime_put_autosuspend(parent); 2042 2043 put_device(parent); 2044 } 2045 2046 static void icm_free_unplugged_children(struct tb_switch *sw) 2047 { 2048 struct tb_port *port; 2049 2050 tb_switch_for_each_port(sw, port) { 2051 if (port->xdomain && port->xdomain->is_unplugged) { 2052 tb_xdomain_remove(port->xdomain); 2053 port->xdomain = NULL; 2054 } else if (tb_port_has_remote(port)) { 2055 if (port->remote->sw->is_unplugged) { 2056 remove_unplugged_switch(port->remote->sw); 2057 port->remote = NULL; 2058 } else { 2059 icm_free_unplugged_children(port->remote->sw); 2060 } 2061 } 2062 } 2063 } 2064 2065 static void icm_rescan_work(struct work_struct *work) 2066 { 2067 struct icm *icm = container_of(work, struct icm, rescan_work.work); 2068 struct tb *tb = icm_to_tb(icm); 2069 2070 mutex_lock(&tb->lock); 2071 if (tb->root_switch) 2072 icm_free_unplugged_children(tb->root_switch); 2073 mutex_unlock(&tb->lock); 2074 } 2075 2076 static void icm_complete(struct tb *tb) 2077 { 2078 struct icm *icm = tb_priv(tb); 2079 2080 if (tb->nhi->going_away) 2081 return; 2082 2083 /* 2084 * If RTD3 was vetoed before we entered system suspend allow it 2085 * again now before driver ready is sent. Firmware sends a new RTD3 2086 * veto if it is still the case after we have sent it driver ready 2087 * command. 2088 */ 2089 icm_veto_end(tb); 2090 icm_unplug_children(tb->root_switch); 2091 2092 /* 2093 * Now all existing children should be resumed, start events 2094 * from ICM to get updated status. 2095 */ 2096 __icm_driver_ready(tb, NULL, NULL, NULL, NULL); 2097 2098 /* 2099 * We do not get notifications of devices that have been 2100 * unplugged during suspend so schedule rescan to clean them up 2101 * if any. 2102 */ 2103 queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500)); 2104 } 2105 2106 static int icm_runtime_suspend(struct tb *tb) 2107 { 2108 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); 2109 return 0; 2110 } 2111 2112 static int icm_runtime_suspend_switch(struct tb_switch *sw) 2113 { 2114 if (tb_route(sw)) 2115 reinit_completion(&sw->rpm_complete); 2116 return 0; 2117 } 2118 2119 static int icm_runtime_resume_switch(struct tb_switch *sw) 2120 { 2121 if (tb_route(sw)) { 2122 if (!wait_for_completion_timeout(&sw->rpm_complete, 2123 msecs_to_jiffies(500))) { 2124 dev_dbg(&sw->dev, "runtime resuming timed out\n"); 2125 } 2126 } 2127 return 0; 2128 } 2129 2130 static int icm_runtime_resume(struct tb *tb) 2131 { 2132 /* 2133 * We can reuse the same resume functionality than with system 2134 * suspend. 2135 */ 2136 icm_complete(tb); 2137 return 0; 2138 } 2139 2140 static int icm_start(struct tb *tb) 2141 { 2142 struct icm *icm = tb_priv(tb); 2143 int ret; 2144 2145 if (icm->safe_mode) 2146 tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0); 2147 else 2148 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); 2149 if (IS_ERR(tb->root_switch)) 2150 return PTR_ERR(tb->root_switch); 2151 2152 tb->root_switch->no_nvm_upgrade = !icm->can_upgrade_nvm; 2153 tb->root_switch->rpm = icm->rpm; 2154 2155 if (icm->set_uuid) 2156 icm->set_uuid(tb); 2157 2158 ret = tb_switch_add(tb->root_switch); 2159 if (ret) { 2160 tb_switch_put(tb->root_switch); 2161 tb->root_switch = NULL; 2162 } 2163 2164 return ret; 2165 } 2166 2167 static void icm_stop(struct tb *tb) 2168 { 2169 struct icm *icm = tb_priv(tb); 2170 2171 cancel_delayed_work(&icm->rescan_work); 2172 tb_switch_remove(tb->root_switch); 2173 tb->root_switch = NULL; 2174 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); 2175 kfree(icm->last_nvm_auth); 2176 icm->last_nvm_auth = NULL; 2177 } 2178 2179 static int icm_disconnect_pcie_paths(struct tb *tb) 2180 { 2181 return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0); 2182 } 2183 2184 static void icm_usb4_switch_nvm_auth_complete(void *data) 2185 { 2186 struct usb4_switch_nvm_auth *auth = data; 2187 struct icm *icm = auth->icm; 2188 struct tb *tb = icm_to_tb(icm); 2189 2190 tb_dbg(tb, "NVM_AUTH response for %llx flags %#x status %#x\n", 2191 get_route(auth->reply.route_hi, auth->reply.route_lo), 2192 auth->reply.hdr.flags, auth->reply.status); 2193 2194 mutex_lock(&tb->lock); 2195 if (WARN_ON(icm->last_nvm_auth)) 2196 kfree(icm->last_nvm_auth); 2197 icm->last_nvm_auth = auth; 2198 mutex_unlock(&tb->lock); 2199 } 2200 2201 static int icm_usb4_switch_nvm_authenticate(struct tb *tb, u64 route) 2202 { 2203 struct usb4_switch_nvm_auth *auth; 2204 struct icm *icm = tb_priv(tb); 2205 struct tb_cfg_request *req; 2206 int ret; 2207 2208 auth = kzalloc(sizeof(*auth), GFP_KERNEL); 2209 if (!auth) 2210 return -ENOMEM; 2211 2212 auth->icm = icm; 2213 auth->request.hdr.code = ICM_USB4_SWITCH_OP; 2214 auth->request.route_hi = upper_32_bits(route); 2215 auth->request.route_lo = lower_32_bits(route); 2216 auth->request.opcode = USB4_SWITCH_OP_NVM_AUTH; 2217 2218 req = tb_cfg_request_alloc(); 2219 if (!req) { 2220 ret = -ENOMEM; 2221 goto err_free_auth; 2222 } 2223 2224 req->match = icm_match; 2225 req->copy = icm_copy; 2226 req->request = &auth->request; 2227 req->request_size = sizeof(auth->request); 2228 req->request_type = TB_CFG_PKG_ICM_CMD; 2229 req->response = &auth->reply; 2230 req->npackets = 1; 2231 req->response_size = sizeof(auth->reply); 2232 req->response_type = TB_CFG_PKG_ICM_RESP; 2233 2234 tb_dbg(tb, "NVM_AUTH request for %llx\n", route); 2235 2236 mutex_lock(&icm->request_lock); 2237 ret = tb_cfg_request(tb->ctl, req, icm_usb4_switch_nvm_auth_complete, 2238 auth); 2239 mutex_unlock(&icm->request_lock); 2240 2241 tb_cfg_request_put(req); 2242 if (ret) 2243 goto err_free_auth; 2244 return 0; 2245 2246 err_free_auth: 2247 kfree(auth); 2248 return ret; 2249 } 2250 2251 static int icm_usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, 2252 u8 *status, const void *tx_data, size_t tx_data_len, 2253 void *rx_data, size_t rx_data_len) 2254 { 2255 struct icm_usb4_switch_op_response reply; 2256 struct icm_usb4_switch_op request; 2257 struct tb *tb = sw->tb; 2258 struct icm *icm = tb_priv(tb); 2259 u64 route = tb_route(sw); 2260 int ret; 2261 2262 /* 2263 * USB4 router operation proxy is supported in firmware if the 2264 * protocol version is 3 or higher. 2265 */ 2266 if (icm->proto_version < 3) 2267 return -EOPNOTSUPP; 2268 2269 /* 2270 * NVM_AUTH is a special USB4 proxy operation that does not 2271 * return immediately so handle it separately. 2272 */ 2273 if (opcode == USB4_SWITCH_OP_NVM_AUTH) 2274 return icm_usb4_switch_nvm_authenticate(tb, route); 2275 2276 memset(&request, 0, sizeof(request)); 2277 request.hdr.code = ICM_USB4_SWITCH_OP; 2278 request.route_hi = upper_32_bits(route); 2279 request.route_lo = lower_32_bits(route); 2280 request.opcode = opcode; 2281 if (metadata) 2282 request.metadata = *metadata; 2283 2284 if (tx_data_len) { 2285 request.data_len_valid |= ICM_USB4_SWITCH_DATA_VALID; 2286 if (tx_data_len < ARRAY_SIZE(request.data)) 2287 request.data_len_valid = 2288 tx_data_len & ICM_USB4_SWITCH_DATA_LEN_MASK; 2289 memcpy(request.data, tx_data, tx_data_len * sizeof(u32)); 2290 } 2291 2292 memset(&reply, 0, sizeof(reply)); 2293 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 2294 1, ICM_TIMEOUT); 2295 if (ret) 2296 return ret; 2297 2298 if (reply.hdr.flags & ICM_FLAGS_ERROR) 2299 return -EIO; 2300 2301 if (status) 2302 *status = reply.status; 2303 2304 if (metadata) 2305 *metadata = reply.metadata; 2306 2307 if (rx_data_len) 2308 memcpy(rx_data, reply.data, rx_data_len * sizeof(u32)); 2309 2310 return 0; 2311 } 2312 2313 static int icm_usb4_switch_nvm_authenticate_status(struct tb_switch *sw, 2314 u32 *status) 2315 { 2316 struct usb4_switch_nvm_auth *auth; 2317 struct tb *tb = sw->tb; 2318 struct icm *icm = tb_priv(tb); 2319 int ret = 0; 2320 2321 if (icm->proto_version < 3) 2322 return -EOPNOTSUPP; 2323 2324 auth = icm->last_nvm_auth; 2325 icm->last_nvm_auth = NULL; 2326 2327 if (auth && auth->reply.route_hi == sw->config.route_hi && 2328 auth->reply.route_lo == sw->config.route_lo) { 2329 tb_dbg(tb, "NVM_AUTH found for %llx flags %#x status %#x\n", 2330 tb_route(sw), auth->reply.hdr.flags, auth->reply.status); 2331 if (auth->reply.hdr.flags & ICM_FLAGS_ERROR) 2332 ret = -EIO; 2333 else 2334 *status = auth->reply.status; 2335 } else { 2336 *status = 0; 2337 } 2338 2339 kfree(auth); 2340 return ret; 2341 } 2342 2343 /* Falcon Ridge */ 2344 static const struct tb_cm_ops icm_fr_ops = { 2345 .driver_ready = icm_driver_ready, 2346 .start = icm_start, 2347 .stop = icm_stop, 2348 .suspend = icm_suspend, 2349 .complete = icm_complete, 2350 .handle_event = icm_handle_event, 2351 .approve_switch = icm_fr_approve_switch, 2352 .add_switch_key = icm_fr_add_switch_key, 2353 .challenge_switch_key = icm_fr_challenge_switch_key, 2354 .disconnect_pcie_paths = icm_disconnect_pcie_paths, 2355 .approve_xdomain_paths = icm_fr_approve_xdomain_paths, 2356 .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths, 2357 }; 2358 2359 /* Alpine Ridge */ 2360 static const struct tb_cm_ops icm_ar_ops = { 2361 .driver_ready = icm_driver_ready, 2362 .start = icm_start, 2363 .stop = icm_stop, 2364 .suspend = icm_suspend, 2365 .complete = icm_complete, 2366 .runtime_suspend = icm_runtime_suspend, 2367 .runtime_resume = icm_runtime_resume, 2368 .runtime_suspend_switch = icm_runtime_suspend_switch, 2369 .runtime_resume_switch = icm_runtime_resume_switch, 2370 .handle_event = icm_handle_event, 2371 .get_boot_acl = icm_ar_get_boot_acl, 2372 .set_boot_acl = icm_ar_set_boot_acl, 2373 .approve_switch = icm_fr_approve_switch, 2374 .add_switch_key = icm_fr_add_switch_key, 2375 .challenge_switch_key = icm_fr_challenge_switch_key, 2376 .disconnect_pcie_paths = icm_disconnect_pcie_paths, 2377 .approve_xdomain_paths = icm_fr_approve_xdomain_paths, 2378 .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths, 2379 }; 2380 2381 /* Titan Ridge */ 2382 static const struct tb_cm_ops icm_tr_ops = { 2383 .driver_ready = icm_driver_ready, 2384 .start = icm_start, 2385 .stop = icm_stop, 2386 .suspend = icm_suspend, 2387 .complete = icm_complete, 2388 .runtime_suspend = icm_runtime_suspend, 2389 .runtime_resume = icm_runtime_resume, 2390 .runtime_suspend_switch = icm_runtime_suspend_switch, 2391 .runtime_resume_switch = icm_runtime_resume_switch, 2392 .handle_event = icm_handle_event, 2393 .get_boot_acl = icm_ar_get_boot_acl, 2394 .set_boot_acl = icm_ar_set_boot_acl, 2395 .approve_switch = icm_tr_approve_switch, 2396 .add_switch_key = icm_tr_add_switch_key, 2397 .challenge_switch_key = icm_tr_challenge_switch_key, 2398 .disconnect_pcie_paths = icm_disconnect_pcie_paths, 2399 .approve_xdomain_paths = icm_tr_approve_xdomain_paths, 2400 .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, 2401 .usb4_switch_op = icm_usb4_switch_op, 2402 .usb4_switch_nvm_authenticate_status = 2403 icm_usb4_switch_nvm_authenticate_status, 2404 }; 2405 2406 /* Ice Lake */ 2407 static const struct tb_cm_ops icm_icl_ops = { 2408 .driver_ready = icm_driver_ready, 2409 .start = icm_start, 2410 .stop = icm_stop, 2411 .complete = icm_complete, 2412 .runtime_suspend = icm_runtime_suspend, 2413 .runtime_resume = icm_runtime_resume, 2414 .handle_event = icm_handle_event, 2415 .approve_xdomain_paths = icm_tr_approve_xdomain_paths, 2416 .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, 2417 .usb4_switch_op = icm_usb4_switch_op, 2418 .usb4_switch_nvm_authenticate_status = 2419 icm_usb4_switch_nvm_authenticate_status, 2420 }; 2421 2422 struct tb *icm_probe(struct tb_nhi *nhi) 2423 { 2424 struct icm *icm; 2425 struct tb *tb; 2426 2427 tb = tb_domain_alloc(nhi, ICM_TIMEOUT, sizeof(struct icm)); 2428 if (!tb) 2429 return NULL; 2430 2431 icm = tb_priv(tb); 2432 INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work); 2433 mutex_init(&icm->request_lock); 2434 2435 switch (nhi->pdev->device) { 2436 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: 2437 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: 2438 icm->can_upgrade_nvm = true; 2439 icm->is_supported = icm_fr_is_supported; 2440 icm->get_route = icm_fr_get_route; 2441 icm->save_devices = icm_fr_save_devices; 2442 icm->driver_ready = icm_fr_driver_ready; 2443 icm->device_connected = icm_fr_device_connected; 2444 icm->device_disconnected = icm_fr_device_disconnected; 2445 icm->xdomain_connected = icm_fr_xdomain_connected; 2446 icm->xdomain_disconnected = icm_fr_xdomain_disconnected; 2447 tb->cm_ops = &icm_fr_ops; 2448 break; 2449 2450 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI: 2451 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI: 2452 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI: 2453 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI: 2454 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI: 2455 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; 2456 /* 2457 * NVM upgrade has not been tested on Apple systems and 2458 * they don't provide images publicly either. To be on 2459 * the safe side prevent root switch NVM upgrade on Macs 2460 * for now. 2461 */ 2462 icm->can_upgrade_nvm = !x86_apple_machine; 2463 icm->is_supported = icm_ar_is_supported; 2464 icm->cio_reset = icm_ar_cio_reset; 2465 icm->get_mode = icm_ar_get_mode; 2466 icm->get_route = icm_ar_get_route; 2467 icm->save_devices = icm_fr_save_devices; 2468 icm->driver_ready = icm_ar_driver_ready; 2469 icm->device_connected = icm_fr_device_connected; 2470 icm->device_disconnected = icm_fr_device_disconnected; 2471 icm->xdomain_connected = icm_fr_xdomain_connected; 2472 icm->xdomain_disconnected = icm_fr_xdomain_disconnected; 2473 tb->cm_ops = &icm_ar_ops; 2474 break; 2475 2476 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI: 2477 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI: 2478 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; 2479 icm->can_upgrade_nvm = !x86_apple_machine; 2480 icm->is_supported = icm_ar_is_supported; 2481 icm->cio_reset = icm_tr_cio_reset; 2482 icm->get_mode = icm_ar_get_mode; 2483 icm->driver_ready = icm_tr_driver_ready; 2484 icm->device_connected = icm_tr_device_connected; 2485 icm->device_disconnected = icm_tr_device_disconnected; 2486 icm->xdomain_connected = icm_tr_xdomain_connected; 2487 icm->xdomain_disconnected = icm_tr_xdomain_disconnected; 2488 tb->cm_ops = &icm_tr_ops; 2489 break; 2490 2491 case PCI_DEVICE_ID_INTEL_ICL_NHI0: 2492 case PCI_DEVICE_ID_INTEL_ICL_NHI1: 2493 icm->is_supported = icm_fr_is_supported; 2494 icm->driver_ready = icm_icl_driver_ready; 2495 icm->set_uuid = icm_icl_set_uuid; 2496 icm->device_connected = icm_icl_device_connected; 2497 icm->device_disconnected = icm_tr_device_disconnected; 2498 icm->xdomain_connected = icm_tr_xdomain_connected; 2499 icm->xdomain_disconnected = icm_tr_xdomain_disconnected; 2500 icm->rtd3_veto = icm_icl_rtd3_veto; 2501 tb->cm_ops = &icm_icl_ops; 2502 break; 2503 2504 case PCI_DEVICE_ID_INTEL_TGL_NHI0: 2505 case PCI_DEVICE_ID_INTEL_TGL_NHI1: 2506 case PCI_DEVICE_ID_INTEL_TGL_H_NHI0: 2507 case PCI_DEVICE_ID_INTEL_TGL_H_NHI1: 2508 icm->is_supported = icm_tgl_is_supported; 2509 icm->driver_ready = icm_icl_driver_ready; 2510 icm->set_uuid = icm_icl_set_uuid; 2511 icm->device_connected = icm_icl_device_connected; 2512 icm->device_disconnected = icm_tr_device_disconnected; 2513 icm->xdomain_connected = icm_tr_xdomain_connected; 2514 icm->xdomain_disconnected = icm_tr_xdomain_disconnected; 2515 icm->rtd3_veto = icm_icl_rtd3_veto; 2516 tb->cm_ops = &icm_icl_ops; 2517 break; 2518 2519 case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI: 2520 icm->is_supported = icm_tgl_is_supported; 2521 icm->get_mode = icm_ar_get_mode; 2522 icm->driver_ready = icm_tr_driver_ready; 2523 icm->device_connected = icm_tr_device_connected; 2524 icm->device_disconnected = icm_tr_device_disconnected; 2525 icm->xdomain_connected = icm_tr_xdomain_connected; 2526 icm->xdomain_disconnected = icm_tr_xdomain_disconnected; 2527 tb->cm_ops = &icm_tr_ops; 2528 break; 2529 } 2530 2531 if (!icm->is_supported || !icm->is_supported(tb)) { 2532 dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n"); 2533 tb_domain_put(tb); 2534 return NULL; 2535 } 2536 2537 tb_dbg(tb, "using firmware connection manager\n"); 2538 2539 return tb; 2540 } 2541