1 /* 2 * Internal Thunderbolt Connection Manager. This is a firmware running on 3 * the Thunderbolt host controller performing most of the low-level 4 * handling. 5 * 6 * Copyright (C) 2017, Intel Corporation 7 * Authors: Michael Jamet <michael.jamet@intel.com> 8 * Mika Westerberg <mika.westerberg@linux.intel.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15 #include <linux/delay.h> 16 #include <linux/mutex.h> 17 #include <linux/pci.h> 18 #include <linux/platform_data/x86/apple.h> 19 #include <linux/sizes.h> 20 #include <linux/slab.h> 21 #include <linux/workqueue.h> 22 23 #include "ctl.h" 24 #include "nhi_regs.h" 25 #include "tb.h" 26 27 #define PCIE2CIO_CMD 0x30 28 #define PCIE2CIO_CMD_TIMEOUT BIT(31) 29 #define PCIE2CIO_CMD_START BIT(30) 30 #define PCIE2CIO_CMD_WRITE BIT(21) 31 #define PCIE2CIO_CMD_CS_MASK GENMASK(20, 19) 32 #define PCIE2CIO_CMD_CS_SHIFT 19 33 #define PCIE2CIO_CMD_PORT_MASK GENMASK(18, 13) 34 #define PCIE2CIO_CMD_PORT_SHIFT 13 35 36 #define PCIE2CIO_WRDATA 0x34 37 #define PCIE2CIO_RDDATA 0x38 38 39 #define PHY_PORT_CS1 0x37 40 #define PHY_PORT_CS1_LINK_DISABLE BIT(14) 41 #define PHY_PORT_CS1_LINK_STATE_MASK GENMASK(29, 26) 42 #define PHY_PORT_CS1_LINK_STATE_SHIFT 26 43 44 #define ICM_TIMEOUT 5000 /* ms */ 45 #define ICM_APPROVE_TIMEOUT 10000 /* ms */ 46 #define ICM_MAX_LINK 4 47 #define ICM_MAX_DEPTH 6 48 49 /** 50 * struct icm - Internal connection manager private data 51 * @request_lock: Makes sure only one message is send to ICM at time 52 * @rescan_work: Work used to rescan the surviving switches after resume 53 * @upstream_port: Pointer to the PCIe upstream port this host 54 * controller is connected. This is only set for systems 55 * where ICM needs to be started manually 56 * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides 57 * (only set when @upstream_port is not %NULL) 58 * @safe_mode: ICM is in safe mode 59 * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported) 60 * @is_supported: Checks if we can support ICM on this controller 61 * @get_mode: Read and return the ICM firmware mode (optional) 62 * @get_route: Find a route string for given switch 63 * @driver_ready: Send driver ready message to ICM 64 * @device_connected: Handle device connected ICM message 65 * @device_disconnected: Handle device disconnected ICM message 66 * @xdomain_connected - Handle XDomain connected ICM message 67 * @xdomain_disconnected - Handle XDomain disconnected ICM message 68 */ 69 struct icm { 70 struct mutex request_lock; 71 struct delayed_work rescan_work; 72 struct pci_dev *upstream_port; 73 size_t max_boot_acl; 74 int vnd_cap; 75 bool safe_mode; 76 bool (*is_supported)(struct tb *tb); 77 int (*get_mode)(struct tb *tb); 78 int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route); 79 int (*driver_ready)(struct tb *tb, 80 enum tb_security_level *security_level, 81 size_t *nboot_acl); 82 void (*device_connected)(struct tb *tb, 83 const struct icm_pkg_header *hdr); 84 void (*device_disconnected)(struct tb *tb, 85 const struct icm_pkg_header *hdr); 86 void (*xdomain_connected)(struct tb *tb, 87 const struct icm_pkg_header *hdr); 88 void (*xdomain_disconnected)(struct tb *tb, 89 const struct icm_pkg_header *hdr); 90 }; 91 92 struct icm_notification { 93 struct work_struct work; 94 struct icm_pkg_header *pkg; 95 struct tb *tb; 96 }; 97 98 static inline struct tb *icm_to_tb(struct icm *icm) 99 { 100 return ((void *)icm - sizeof(struct tb)); 101 } 102 103 static inline u8 phy_port_from_route(u64 route, u8 depth) 104 { 105 u8 link; 106 107 link = depth ? route >> ((depth - 1) * 8) : route; 108 return tb_phy_port_from_link(link); 109 } 110 111 static inline u8 dual_link_from_link(u8 link) 112 { 113 return link ? ((link - 1) ^ 0x01) + 1 : 0; 114 } 115 116 static inline u64 get_route(u32 route_hi, u32 route_lo) 117 { 118 return (u64)route_hi << 32 | route_lo; 119 } 120 121 static inline u64 get_parent_route(u64 route) 122 { 123 int depth = tb_route_length(route); 124 return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0; 125 } 126 127 static bool icm_match(const struct tb_cfg_request *req, 128 const struct ctl_pkg *pkg) 129 { 130 const struct icm_pkg_header *res_hdr = pkg->buffer; 131 const struct icm_pkg_header *req_hdr = req->request; 132 133 if (pkg->frame.eof != req->response_type) 134 return false; 135 if (res_hdr->code != req_hdr->code) 136 return false; 137 138 return true; 139 } 140 141 static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) 142 { 143 const struct icm_pkg_header *hdr = pkg->buffer; 144 145 if (hdr->packet_id < req->npackets) { 146 size_t offset = hdr->packet_id * req->response_size; 147 148 memcpy(req->response + offset, pkg->buffer, req->response_size); 149 } 150 151 return hdr->packet_id == hdr->total_packets - 1; 152 } 153 154 static int icm_request(struct tb *tb, const void *request, size_t request_size, 155 void *response, size_t response_size, size_t npackets, 156 unsigned int timeout_msec) 157 { 158 struct icm *icm = tb_priv(tb); 159 int retries = 3; 160 161 do { 162 struct tb_cfg_request *req; 163 struct tb_cfg_result res; 164 165 req = tb_cfg_request_alloc(); 166 if (!req) 167 return -ENOMEM; 168 169 req->match = icm_match; 170 req->copy = icm_copy; 171 req->request = request; 172 req->request_size = request_size; 173 req->request_type = TB_CFG_PKG_ICM_CMD; 174 req->response = response; 175 req->npackets = npackets; 176 req->response_size = response_size; 177 req->response_type = TB_CFG_PKG_ICM_RESP; 178 179 mutex_lock(&icm->request_lock); 180 res = tb_cfg_request_sync(tb->ctl, req, timeout_msec); 181 mutex_unlock(&icm->request_lock); 182 183 tb_cfg_request_put(req); 184 185 if (res.err != -ETIMEDOUT) 186 return res.err == 1 ? -EIO : res.err; 187 188 usleep_range(20, 50); 189 } while (retries--); 190 191 return -ETIMEDOUT; 192 } 193 194 static bool icm_fr_is_supported(struct tb *tb) 195 { 196 return !x86_apple_machine; 197 } 198 199 static inline int icm_fr_get_switch_index(u32 port) 200 { 201 int index; 202 203 if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT) 204 return 0; 205 206 index = port >> ICM_PORT_INDEX_SHIFT; 207 return index != 0xff ? index : 0; 208 } 209 210 static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) 211 { 212 struct icm_fr_pkg_get_topology_response *switches, *sw; 213 struct icm_fr_pkg_get_topology request = { 214 .hdr = { .code = ICM_GET_TOPOLOGY }, 215 }; 216 size_t npackets = ICM_GET_TOPOLOGY_PACKETS; 217 int ret, index; 218 u8 i; 219 220 switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL); 221 if (!switches) 222 return -ENOMEM; 223 224 ret = icm_request(tb, &request, sizeof(request), switches, 225 sizeof(*switches), npackets, ICM_TIMEOUT); 226 if (ret) 227 goto err_free; 228 229 sw = &switches[0]; 230 index = icm_fr_get_switch_index(sw->ports[link]); 231 if (!index) { 232 ret = -ENODEV; 233 goto err_free; 234 } 235 236 sw = &switches[index]; 237 for (i = 1; i < depth; i++) { 238 unsigned int j; 239 240 if (!(sw->first_data & ICM_SWITCH_USED)) { 241 ret = -ENODEV; 242 goto err_free; 243 } 244 245 for (j = 0; j < ARRAY_SIZE(sw->ports); j++) { 246 index = icm_fr_get_switch_index(sw->ports[j]); 247 if (index > sw->switch_index) { 248 sw = &switches[index]; 249 break; 250 } 251 } 252 } 253 254 *route = get_route(sw->route_hi, sw->route_lo); 255 256 err_free: 257 kfree(switches); 258 return ret; 259 } 260 261 static int 262 icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level, 263 size_t *nboot_acl) 264 { 265 struct icm_fr_pkg_driver_ready_response reply; 266 struct icm_pkg_driver_ready request = { 267 .hdr.code = ICM_DRIVER_READY, 268 }; 269 int ret; 270 271 memset(&reply, 0, sizeof(reply)); 272 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 273 1, ICM_TIMEOUT); 274 if (ret) 275 return ret; 276 277 if (security_level) 278 *security_level = reply.security_level & ICM_FR_SLEVEL_MASK; 279 280 return 0; 281 } 282 283 static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw) 284 { 285 struct icm_fr_pkg_approve_device request; 286 struct icm_fr_pkg_approve_device reply; 287 int ret; 288 289 memset(&request, 0, sizeof(request)); 290 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 291 request.hdr.code = ICM_APPROVE_DEVICE; 292 request.connection_id = sw->connection_id; 293 request.connection_key = sw->connection_key; 294 295 memset(&reply, 0, sizeof(reply)); 296 /* Use larger timeout as establishing tunnels can take some time */ 297 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 298 1, ICM_APPROVE_TIMEOUT); 299 if (ret) 300 return ret; 301 302 if (reply.hdr.flags & ICM_FLAGS_ERROR) { 303 tb_warn(tb, "PCIe tunnel creation failed\n"); 304 return -EIO; 305 } 306 307 return 0; 308 } 309 310 static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw) 311 { 312 struct icm_fr_pkg_add_device_key request; 313 struct icm_fr_pkg_add_device_key_response reply; 314 int ret; 315 316 memset(&request, 0, sizeof(request)); 317 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 318 request.hdr.code = ICM_ADD_DEVICE_KEY; 319 request.connection_id = sw->connection_id; 320 request.connection_key = sw->connection_key; 321 memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); 322 323 memset(&reply, 0, sizeof(reply)); 324 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 325 1, ICM_TIMEOUT); 326 if (ret) 327 return ret; 328 329 if (reply.hdr.flags & ICM_FLAGS_ERROR) { 330 tb_warn(tb, "Adding key to switch failed\n"); 331 return -EIO; 332 } 333 334 return 0; 335 } 336 337 static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, 338 const u8 *challenge, u8 *response) 339 { 340 struct icm_fr_pkg_challenge_device request; 341 struct icm_fr_pkg_challenge_device_response reply; 342 int ret; 343 344 memset(&request, 0, sizeof(request)); 345 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 346 request.hdr.code = ICM_CHALLENGE_DEVICE; 347 request.connection_id = sw->connection_id; 348 request.connection_key = sw->connection_key; 349 memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE); 350 351 memset(&reply, 0, sizeof(reply)); 352 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 353 1, ICM_TIMEOUT); 354 if (ret) 355 return ret; 356 357 if (reply.hdr.flags & ICM_FLAGS_ERROR) 358 return -EKEYREJECTED; 359 if (reply.hdr.flags & ICM_FLAGS_NO_KEY) 360 return -ENOKEY; 361 362 memcpy(response, reply.response, TB_SWITCH_KEY_SIZE); 363 364 return 0; 365 } 366 367 static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 368 { 369 struct icm_fr_pkg_approve_xdomain_response reply; 370 struct icm_fr_pkg_approve_xdomain request; 371 int ret; 372 373 memset(&request, 0, sizeof(request)); 374 request.hdr.code = ICM_APPROVE_XDOMAIN; 375 request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link; 376 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); 377 378 request.transmit_path = xd->transmit_path; 379 request.transmit_ring = xd->transmit_ring; 380 request.receive_path = xd->receive_path; 381 request.receive_ring = xd->receive_ring; 382 383 memset(&reply, 0, sizeof(reply)); 384 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 385 1, ICM_TIMEOUT); 386 if (ret) 387 return ret; 388 389 if (reply.hdr.flags & ICM_FLAGS_ERROR) 390 return -EIO; 391 392 return 0; 393 } 394 395 static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 396 { 397 u8 phy_port; 398 u8 cmd; 399 400 phy_port = tb_phy_port_from_link(xd->link); 401 if (phy_port == 0) 402 cmd = NHI_MAILBOX_DISCONNECT_PA; 403 else 404 cmd = NHI_MAILBOX_DISCONNECT_PB; 405 406 nhi_mailbox_cmd(tb->nhi, cmd, 1); 407 usleep_range(10, 50); 408 nhi_mailbox_cmd(tb->nhi, cmd, 2); 409 return 0; 410 } 411 412 static void add_switch(struct tb_switch *parent_sw, u64 route, 413 const uuid_t *uuid, u8 connection_id, u8 connection_key, 414 u8 link, u8 depth, enum tb_security_level security_level, 415 bool authorized, bool boot) 416 { 417 struct tb_switch *sw; 418 419 sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route); 420 if (!sw) 421 return; 422 423 sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL); 424 sw->connection_id = connection_id; 425 sw->connection_key = connection_key; 426 sw->link = link; 427 sw->depth = depth; 428 sw->authorized = authorized; 429 sw->security_level = security_level; 430 sw->boot = boot; 431 432 /* Link the two switches now */ 433 tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); 434 tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw); 435 436 if (tb_switch_add(sw)) { 437 tb_port_at(tb_route(sw), parent_sw)->remote = NULL; 438 tb_switch_put(sw); 439 return; 440 } 441 } 442 443 static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw, 444 u64 route, u8 connection_id, u8 connection_key, 445 u8 link, u8 depth, bool boot) 446 { 447 /* Disconnect from parent */ 448 tb_port_at(tb_route(sw), parent_sw)->remote = NULL; 449 /* Re-connect via updated port*/ 450 tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); 451 452 /* Update with the new addressing information */ 453 sw->config.route_hi = upper_32_bits(route); 454 sw->config.route_lo = lower_32_bits(route); 455 sw->connection_id = connection_id; 456 sw->connection_key = connection_key; 457 sw->link = link; 458 sw->depth = depth; 459 sw->boot = boot; 460 461 /* This switch still exists */ 462 sw->is_unplugged = false; 463 } 464 465 static void remove_switch(struct tb_switch *sw) 466 { 467 struct tb_switch *parent_sw; 468 469 parent_sw = tb_to_switch(sw->dev.parent); 470 tb_port_at(tb_route(sw), parent_sw)->remote = NULL; 471 tb_switch_remove(sw); 472 } 473 474 static void add_xdomain(struct tb_switch *sw, u64 route, 475 const uuid_t *local_uuid, const uuid_t *remote_uuid, 476 u8 link, u8 depth) 477 { 478 struct tb_xdomain *xd; 479 480 xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid); 481 if (!xd) 482 return; 483 484 xd->link = link; 485 xd->depth = depth; 486 487 tb_port_at(route, sw)->xdomain = xd; 488 489 tb_xdomain_add(xd); 490 } 491 492 static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link) 493 { 494 xd->link = link; 495 xd->route = route; 496 xd->is_unplugged = false; 497 } 498 499 static void remove_xdomain(struct tb_xdomain *xd) 500 { 501 struct tb_switch *sw; 502 503 sw = tb_to_switch(xd->dev.parent); 504 tb_port_at(xd->route, sw)->xdomain = NULL; 505 tb_xdomain_remove(xd); 506 } 507 508 static void 509 icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) 510 { 511 const struct icm_fr_event_device_connected *pkg = 512 (const struct icm_fr_event_device_connected *)hdr; 513 enum tb_security_level security_level; 514 struct tb_switch *sw, *parent_sw; 515 struct icm *icm = tb_priv(tb); 516 bool authorized = false; 517 struct tb_xdomain *xd; 518 u8 link, depth; 519 bool boot; 520 u64 route; 521 int ret; 522 523 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; 524 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> 525 ICM_LINK_INFO_DEPTH_SHIFT; 526 authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; 527 security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> 528 ICM_FLAGS_SLEVEL_SHIFT; 529 boot = pkg->link_info & ICM_LINK_INFO_BOOT; 530 531 if (pkg->link_info & ICM_LINK_INFO_REJECTED) { 532 tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n", 533 link, depth); 534 return; 535 } 536 537 ret = icm->get_route(tb, link, depth, &route); 538 if (ret) { 539 tb_err(tb, "failed to find route string for switch at %u.%u\n", 540 link, depth); 541 return; 542 } 543 544 sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid); 545 if (sw) { 546 u8 phy_port, sw_phy_port; 547 548 parent_sw = tb_to_switch(sw->dev.parent); 549 sw_phy_port = phy_port_from_route(tb_route(sw), sw->depth); 550 phy_port = phy_port_from_route(route, depth); 551 552 /* 553 * On resume ICM will send us connected events for the 554 * devices that still are present. However, that 555 * information might have changed for example by the 556 * fact that a switch on a dual-link connection might 557 * have been enumerated using the other link now. Make 558 * sure our book keeping matches that. 559 */ 560 if (sw->depth == depth && sw_phy_port == phy_port && 561 !!sw->authorized == authorized) { 562 update_switch(parent_sw, sw, route, pkg->connection_id, 563 pkg->connection_key, link, depth, boot); 564 tb_switch_put(sw); 565 return; 566 } 567 568 /* 569 * User connected the same switch to another physical 570 * port or to another part of the topology. Remove the 571 * existing switch now before adding the new one. 572 */ 573 remove_switch(sw); 574 tb_switch_put(sw); 575 } 576 577 /* 578 * If the switch was not found by UUID, look for a switch on 579 * same physical port (taking possible link aggregation into 580 * account) and depth. If we found one it is definitely a stale 581 * one so remove it first. 582 */ 583 sw = tb_switch_find_by_link_depth(tb, link, depth); 584 if (!sw) { 585 u8 dual_link; 586 587 dual_link = dual_link_from_link(link); 588 if (dual_link) 589 sw = tb_switch_find_by_link_depth(tb, dual_link, depth); 590 } 591 if (sw) { 592 remove_switch(sw); 593 tb_switch_put(sw); 594 } 595 596 /* Remove existing XDomain connection if found */ 597 xd = tb_xdomain_find_by_link_depth(tb, link, depth); 598 if (xd) { 599 remove_xdomain(xd); 600 tb_xdomain_put(xd); 601 } 602 603 parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1); 604 if (!parent_sw) { 605 tb_err(tb, "failed to find parent switch for %u.%u\n", 606 link, depth); 607 return; 608 } 609 610 add_switch(parent_sw, route, &pkg->ep_uuid, pkg->connection_id, 611 pkg->connection_key, link, depth, security_level, 612 authorized, boot); 613 614 tb_switch_put(parent_sw); 615 } 616 617 static void 618 icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) 619 { 620 const struct icm_fr_event_device_disconnected *pkg = 621 (const struct icm_fr_event_device_disconnected *)hdr; 622 struct tb_switch *sw; 623 u8 link, depth; 624 625 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; 626 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> 627 ICM_LINK_INFO_DEPTH_SHIFT; 628 629 if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) { 630 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth); 631 return; 632 } 633 634 sw = tb_switch_find_by_link_depth(tb, link, depth); 635 if (!sw) { 636 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link, 637 depth); 638 return; 639 } 640 641 remove_switch(sw); 642 tb_switch_put(sw); 643 } 644 645 static void 646 icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr) 647 { 648 const struct icm_fr_event_xdomain_connected *pkg = 649 (const struct icm_fr_event_xdomain_connected *)hdr; 650 struct tb_xdomain *xd; 651 struct tb_switch *sw; 652 u8 link, depth; 653 bool approved; 654 u64 route; 655 656 /* 657 * After NVM upgrade adding root switch device fails because we 658 * initiated reset. During that time ICM might still send 659 * XDomain connected message which we ignore here. 660 */ 661 if (!tb->root_switch) 662 return; 663 664 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; 665 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> 666 ICM_LINK_INFO_DEPTH_SHIFT; 667 approved = pkg->link_info & ICM_LINK_INFO_APPROVED; 668 669 if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) { 670 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth); 671 return; 672 } 673 674 route = get_route(pkg->local_route_hi, pkg->local_route_lo); 675 676 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); 677 if (xd) { 678 u8 xd_phy_port, phy_port; 679 680 xd_phy_port = phy_port_from_route(xd->route, xd->depth); 681 phy_port = phy_port_from_route(route, depth); 682 683 if (xd->depth == depth && xd_phy_port == phy_port) { 684 update_xdomain(xd, route, link); 685 tb_xdomain_put(xd); 686 return; 687 } 688 689 /* 690 * If we find an existing XDomain connection remove it 691 * now. We need to go through login handshake and 692 * everything anyway to be able to re-establish the 693 * connection. 694 */ 695 remove_xdomain(xd); 696 tb_xdomain_put(xd); 697 } 698 699 /* 700 * Look if there already exists an XDomain in the same place 701 * than the new one and in that case remove it because it is 702 * most likely another host that got disconnected. 703 */ 704 xd = tb_xdomain_find_by_link_depth(tb, link, depth); 705 if (!xd) { 706 u8 dual_link; 707 708 dual_link = dual_link_from_link(link); 709 if (dual_link) 710 xd = tb_xdomain_find_by_link_depth(tb, dual_link, 711 depth); 712 } 713 if (xd) { 714 remove_xdomain(xd); 715 tb_xdomain_put(xd); 716 } 717 718 /* 719 * If the user disconnected a switch during suspend and 720 * connected another host to the same port, remove the switch 721 * first. 722 */ 723 sw = get_switch_at_route(tb->root_switch, route); 724 if (sw) 725 remove_switch(sw); 726 727 sw = tb_switch_find_by_link_depth(tb, link, depth); 728 if (!sw) { 729 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link, 730 depth); 731 return; 732 } 733 734 add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link, 735 depth); 736 tb_switch_put(sw); 737 } 738 739 static void 740 icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) 741 { 742 const struct icm_fr_event_xdomain_disconnected *pkg = 743 (const struct icm_fr_event_xdomain_disconnected *)hdr; 744 struct tb_xdomain *xd; 745 746 /* 747 * If the connection is through one or multiple devices, the 748 * XDomain device is removed along with them so it is fine if we 749 * cannot find it here. 750 */ 751 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); 752 if (xd) { 753 remove_xdomain(xd); 754 tb_xdomain_put(xd); 755 } 756 } 757 758 static int 759 icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level, 760 size_t *nboot_acl) 761 { 762 struct icm_tr_pkg_driver_ready_response reply; 763 struct icm_pkg_driver_ready request = { 764 .hdr.code = ICM_DRIVER_READY, 765 }; 766 int ret; 767 768 memset(&reply, 0, sizeof(reply)); 769 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 770 1, 20000); 771 if (ret) 772 return ret; 773 774 if (security_level) 775 *security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK; 776 if (nboot_acl) 777 *nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >> 778 ICM_TR_INFO_BOOT_ACL_SHIFT; 779 return 0; 780 } 781 782 static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw) 783 { 784 struct icm_tr_pkg_approve_device request; 785 struct icm_tr_pkg_approve_device reply; 786 int ret; 787 788 memset(&request, 0, sizeof(request)); 789 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 790 request.hdr.code = ICM_APPROVE_DEVICE; 791 request.route_lo = sw->config.route_lo; 792 request.route_hi = sw->config.route_hi; 793 request.connection_id = sw->connection_id; 794 795 memset(&reply, 0, sizeof(reply)); 796 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 797 1, ICM_APPROVE_TIMEOUT); 798 if (ret) 799 return ret; 800 801 if (reply.hdr.flags & ICM_FLAGS_ERROR) { 802 tb_warn(tb, "PCIe tunnel creation failed\n"); 803 return -EIO; 804 } 805 806 return 0; 807 } 808 809 static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw) 810 { 811 struct icm_tr_pkg_add_device_key_response reply; 812 struct icm_tr_pkg_add_device_key request; 813 int ret; 814 815 memset(&request, 0, sizeof(request)); 816 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 817 request.hdr.code = ICM_ADD_DEVICE_KEY; 818 request.route_lo = sw->config.route_lo; 819 request.route_hi = sw->config.route_hi; 820 request.connection_id = sw->connection_id; 821 memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); 822 823 memset(&reply, 0, sizeof(reply)); 824 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 825 1, ICM_TIMEOUT); 826 if (ret) 827 return ret; 828 829 if (reply.hdr.flags & ICM_FLAGS_ERROR) { 830 tb_warn(tb, "Adding key to switch failed\n"); 831 return -EIO; 832 } 833 834 return 0; 835 } 836 837 static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, 838 const u8 *challenge, u8 *response) 839 { 840 struct icm_tr_pkg_challenge_device_response reply; 841 struct icm_tr_pkg_challenge_device request; 842 int ret; 843 844 memset(&request, 0, sizeof(request)); 845 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 846 request.hdr.code = ICM_CHALLENGE_DEVICE; 847 request.route_lo = sw->config.route_lo; 848 request.route_hi = sw->config.route_hi; 849 request.connection_id = sw->connection_id; 850 memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE); 851 852 memset(&reply, 0, sizeof(reply)); 853 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 854 1, ICM_TIMEOUT); 855 if (ret) 856 return ret; 857 858 if (reply.hdr.flags & ICM_FLAGS_ERROR) 859 return -EKEYREJECTED; 860 if (reply.hdr.flags & ICM_FLAGS_NO_KEY) 861 return -ENOKEY; 862 863 memcpy(response, reply.response, TB_SWITCH_KEY_SIZE); 864 865 return 0; 866 } 867 868 static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 869 { 870 struct icm_tr_pkg_approve_xdomain_response reply; 871 struct icm_tr_pkg_approve_xdomain request; 872 int ret; 873 874 memset(&request, 0, sizeof(request)); 875 request.hdr.code = ICM_APPROVE_XDOMAIN; 876 request.route_hi = upper_32_bits(xd->route); 877 request.route_lo = lower_32_bits(xd->route); 878 request.transmit_path = xd->transmit_path; 879 request.transmit_ring = xd->transmit_ring; 880 request.receive_path = xd->receive_path; 881 request.receive_ring = xd->receive_ring; 882 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); 883 884 memset(&reply, 0, sizeof(reply)); 885 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 886 1, ICM_TIMEOUT); 887 if (ret) 888 return ret; 889 890 if (reply.hdr.flags & ICM_FLAGS_ERROR) 891 return -EIO; 892 893 return 0; 894 } 895 896 static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd, 897 int stage) 898 { 899 struct icm_tr_pkg_disconnect_xdomain_response reply; 900 struct icm_tr_pkg_disconnect_xdomain request; 901 int ret; 902 903 memset(&request, 0, sizeof(request)); 904 request.hdr.code = ICM_DISCONNECT_XDOMAIN; 905 request.stage = stage; 906 request.route_hi = upper_32_bits(xd->route); 907 request.route_lo = lower_32_bits(xd->route); 908 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); 909 910 memset(&reply, 0, sizeof(reply)); 911 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 912 1, ICM_TIMEOUT); 913 if (ret) 914 return ret; 915 916 if (reply.hdr.flags & ICM_FLAGS_ERROR) 917 return -EIO; 918 919 return 0; 920 } 921 922 static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 923 { 924 int ret; 925 926 ret = icm_tr_xdomain_tear_down(tb, xd, 1); 927 if (ret) 928 return ret; 929 930 usleep_range(10, 50); 931 return icm_tr_xdomain_tear_down(tb, xd, 2); 932 } 933 934 static void 935 icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) 936 { 937 const struct icm_tr_event_device_connected *pkg = 938 (const struct icm_tr_event_device_connected *)hdr; 939 enum tb_security_level security_level; 940 struct tb_switch *sw, *parent_sw; 941 struct tb_xdomain *xd; 942 bool authorized, boot; 943 u64 route; 944 945 /* 946 * Currently we don't use the QoS information coming with the 947 * device connected message so simply just ignore that extra 948 * packet for now. 949 */ 950 if (pkg->hdr.packet_id) 951 return; 952 953 /* 954 * After NVM upgrade adding root switch device fails because we 955 * initiated reset. During that time ICM might still send device 956 * connected message which we ignore here. 957 */ 958 if (!tb->root_switch) 959 return; 960 961 route = get_route(pkg->route_hi, pkg->route_lo); 962 authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; 963 security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> 964 ICM_FLAGS_SLEVEL_SHIFT; 965 boot = pkg->link_info & ICM_LINK_INFO_BOOT; 966 967 if (pkg->link_info & ICM_LINK_INFO_REJECTED) { 968 tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n", 969 route); 970 return; 971 } 972 973 sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid); 974 if (sw) { 975 /* Update the switch if it is still in the same place */ 976 if (tb_route(sw) == route && !!sw->authorized == authorized) { 977 parent_sw = tb_to_switch(sw->dev.parent); 978 update_switch(parent_sw, sw, route, pkg->connection_id, 979 0, 0, 0, boot); 980 tb_switch_put(sw); 981 return; 982 } 983 984 remove_switch(sw); 985 tb_switch_put(sw); 986 } 987 988 /* Another switch with the same address */ 989 sw = tb_switch_find_by_route(tb, route); 990 if (sw) { 991 remove_switch(sw); 992 tb_switch_put(sw); 993 } 994 995 /* XDomain connection with the same address */ 996 xd = tb_xdomain_find_by_route(tb, route); 997 if (xd) { 998 remove_xdomain(xd); 999 tb_xdomain_put(xd); 1000 } 1001 1002 parent_sw = tb_switch_find_by_route(tb, get_parent_route(route)); 1003 if (!parent_sw) { 1004 tb_err(tb, "failed to find parent switch for %llx\n", route); 1005 return; 1006 } 1007 1008 add_switch(parent_sw, route, &pkg->ep_uuid, pkg->connection_id, 1009 0, 0, 0, security_level, authorized, boot); 1010 1011 tb_switch_put(parent_sw); 1012 } 1013 1014 static void 1015 icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) 1016 { 1017 const struct icm_tr_event_device_disconnected *pkg = 1018 (const struct icm_tr_event_device_disconnected *)hdr; 1019 struct tb_switch *sw; 1020 u64 route; 1021 1022 route = get_route(pkg->route_hi, pkg->route_lo); 1023 1024 sw = tb_switch_find_by_route(tb, route); 1025 if (!sw) { 1026 tb_warn(tb, "no switch exists at %llx, ignoring\n", route); 1027 return; 1028 } 1029 1030 remove_switch(sw); 1031 tb_switch_put(sw); 1032 } 1033 1034 static void 1035 icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr) 1036 { 1037 const struct icm_tr_event_xdomain_connected *pkg = 1038 (const struct icm_tr_event_xdomain_connected *)hdr; 1039 struct tb_xdomain *xd; 1040 struct tb_switch *sw; 1041 u64 route; 1042 1043 if (!tb->root_switch) 1044 return; 1045 1046 route = get_route(pkg->local_route_hi, pkg->local_route_lo); 1047 1048 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); 1049 if (xd) { 1050 if (xd->route == route) { 1051 update_xdomain(xd, route, 0); 1052 tb_xdomain_put(xd); 1053 return; 1054 } 1055 1056 remove_xdomain(xd); 1057 tb_xdomain_put(xd); 1058 } 1059 1060 /* An existing xdomain with the same address */ 1061 xd = tb_xdomain_find_by_route(tb, route); 1062 if (xd) { 1063 remove_xdomain(xd); 1064 tb_xdomain_put(xd); 1065 } 1066 1067 /* 1068 * If the user disconnected a switch during suspend and 1069 * connected another host to the same port, remove the switch 1070 * first. 1071 */ 1072 sw = get_switch_at_route(tb->root_switch, route); 1073 if (sw) 1074 remove_switch(sw); 1075 1076 sw = tb_switch_find_by_route(tb, get_parent_route(route)); 1077 if (!sw) { 1078 tb_warn(tb, "no switch exists at %llx, ignoring\n", route); 1079 return; 1080 } 1081 1082 add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0); 1083 tb_switch_put(sw); 1084 } 1085 1086 static void 1087 icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) 1088 { 1089 const struct icm_tr_event_xdomain_disconnected *pkg = 1090 (const struct icm_tr_event_xdomain_disconnected *)hdr; 1091 struct tb_xdomain *xd; 1092 u64 route; 1093 1094 route = get_route(pkg->route_hi, pkg->route_lo); 1095 1096 xd = tb_xdomain_find_by_route(tb, route); 1097 if (xd) { 1098 remove_xdomain(xd); 1099 tb_xdomain_put(xd); 1100 } 1101 } 1102 1103 static struct pci_dev *get_upstream_port(struct pci_dev *pdev) 1104 { 1105 struct pci_dev *parent; 1106 1107 parent = pci_upstream_bridge(pdev); 1108 while (parent) { 1109 if (!pci_is_pcie(parent)) 1110 return NULL; 1111 if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM) 1112 break; 1113 parent = pci_upstream_bridge(parent); 1114 } 1115 1116 if (!parent) 1117 return NULL; 1118 1119 switch (parent->device) { 1120 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: 1121 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: 1122 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: 1123 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: 1124 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: 1125 return parent; 1126 } 1127 1128 return NULL; 1129 } 1130 1131 static bool icm_ar_is_supported(struct tb *tb) 1132 { 1133 struct pci_dev *upstream_port; 1134 struct icm *icm = tb_priv(tb); 1135 1136 /* 1137 * Starting from Alpine Ridge we can use ICM on Apple machines 1138 * as well. We just need to reset and re-enable it first. 1139 */ 1140 if (!x86_apple_machine) 1141 return true; 1142 1143 /* 1144 * Find the upstream PCIe port in case we need to do reset 1145 * through its vendor specific registers. 1146 */ 1147 upstream_port = get_upstream_port(tb->nhi->pdev); 1148 if (upstream_port) { 1149 int cap; 1150 1151 cap = pci_find_ext_capability(upstream_port, 1152 PCI_EXT_CAP_ID_VNDR); 1153 if (cap > 0) { 1154 icm->upstream_port = upstream_port; 1155 icm->vnd_cap = cap; 1156 1157 return true; 1158 } 1159 } 1160 1161 return false; 1162 } 1163 1164 static int icm_ar_get_mode(struct tb *tb) 1165 { 1166 struct tb_nhi *nhi = tb->nhi; 1167 int retries = 60; 1168 u32 val; 1169 1170 do { 1171 val = ioread32(nhi->iobase + REG_FW_STS); 1172 if (val & REG_FW_STS_NVM_AUTH_DONE) 1173 break; 1174 msleep(50); 1175 } while (--retries); 1176 1177 if (!retries) { 1178 dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n"); 1179 return -ENODEV; 1180 } 1181 1182 return nhi_mailbox_mode(nhi); 1183 } 1184 1185 static int 1186 icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level, 1187 size_t *nboot_acl) 1188 { 1189 struct icm_ar_pkg_driver_ready_response reply; 1190 struct icm_pkg_driver_ready request = { 1191 .hdr.code = ICM_DRIVER_READY, 1192 }; 1193 int ret; 1194 1195 memset(&reply, 0, sizeof(reply)); 1196 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1197 1, ICM_TIMEOUT); 1198 if (ret) 1199 return ret; 1200 1201 if (security_level) 1202 *security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK; 1203 if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED)) 1204 *nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >> 1205 ICM_AR_INFO_BOOT_ACL_SHIFT; 1206 return 0; 1207 } 1208 1209 static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) 1210 { 1211 struct icm_ar_pkg_get_route_response reply; 1212 struct icm_ar_pkg_get_route request = { 1213 .hdr = { .code = ICM_GET_ROUTE }, 1214 .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link, 1215 }; 1216 int ret; 1217 1218 memset(&reply, 0, sizeof(reply)); 1219 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1220 1, ICM_TIMEOUT); 1221 if (ret) 1222 return ret; 1223 1224 if (reply.hdr.flags & ICM_FLAGS_ERROR) 1225 return -EIO; 1226 1227 *route = get_route(reply.route_hi, reply.route_lo); 1228 return 0; 1229 } 1230 1231 static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids) 1232 { 1233 struct icm_ar_pkg_preboot_acl_response reply; 1234 struct icm_ar_pkg_preboot_acl request = { 1235 .hdr = { .code = ICM_PREBOOT_ACL }, 1236 }; 1237 int ret, i; 1238 1239 memset(&reply, 0, sizeof(reply)); 1240 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1241 1, ICM_TIMEOUT); 1242 if (ret) 1243 return ret; 1244 1245 if (reply.hdr.flags & ICM_FLAGS_ERROR) 1246 return -EIO; 1247 1248 for (i = 0; i < nuuids; i++) { 1249 u32 *uuid = (u32 *)&uuids[i]; 1250 1251 uuid[0] = reply.acl[i].uuid_lo; 1252 uuid[1] = reply.acl[i].uuid_hi; 1253 1254 if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) { 1255 /* Map empty entries to null UUID */ 1256 uuid[0] = 0; 1257 uuid[1] = 0; 1258 } else { 1259 /* Upper two DWs are always one's */ 1260 uuid[2] = 0xffffffff; 1261 uuid[3] = 0xffffffff; 1262 } 1263 } 1264 1265 return ret; 1266 } 1267 1268 static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids, 1269 size_t nuuids) 1270 { 1271 struct icm_ar_pkg_preboot_acl_response reply; 1272 struct icm_ar_pkg_preboot_acl request = { 1273 .hdr = { 1274 .code = ICM_PREBOOT_ACL, 1275 .flags = ICM_FLAGS_WRITE, 1276 }, 1277 }; 1278 int ret, i; 1279 1280 for (i = 0; i < nuuids; i++) { 1281 const u32 *uuid = (const u32 *)&uuids[i]; 1282 1283 if (uuid_is_null(&uuids[i])) { 1284 /* 1285 * Map null UUID to the empty (all one) entries 1286 * for ICM. 1287 */ 1288 request.acl[i].uuid_lo = 0xffffffff; 1289 request.acl[i].uuid_hi = 0xffffffff; 1290 } else { 1291 /* Two high DWs need to be set to all one */ 1292 if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff) 1293 return -EINVAL; 1294 1295 request.acl[i].uuid_lo = uuid[0]; 1296 request.acl[i].uuid_hi = uuid[1]; 1297 } 1298 } 1299 1300 memset(&reply, 0, sizeof(reply)); 1301 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1302 1, ICM_TIMEOUT); 1303 if (ret) 1304 return ret; 1305 1306 if (reply.hdr.flags & ICM_FLAGS_ERROR) 1307 return -EIO; 1308 1309 return 0; 1310 } 1311 1312 static void icm_handle_notification(struct work_struct *work) 1313 { 1314 struct icm_notification *n = container_of(work, typeof(*n), work); 1315 struct tb *tb = n->tb; 1316 struct icm *icm = tb_priv(tb); 1317 1318 mutex_lock(&tb->lock); 1319 1320 switch (n->pkg->code) { 1321 case ICM_EVENT_DEVICE_CONNECTED: 1322 icm->device_connected(tb, n->pkg); 1323 break; 1324 case ICM_EVENT_DEVICE_DISCONNECTED: 1325 icm->device_disconnected(tb, n->pkg); 1326 break; 1327 case ICM_EVENT_XDOMAIN_CONNECTED: 1328 icm->xdomain_connected(tb, n->pkg); 1329 break; 1330 case ICM_EVENT_XDOMAIN_DISCONNECTED: 1331 icm->xdomain_disconnected(tb, n->pkg); 1332 break; 1333 } 1334 1335 mutex_unlock(&tb->lock); 1336 1337 kfree(n->pkg); 1338 kfree(n); 1339 } 1340 1341 static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, 1342 const void *buf, size_t size) 1343 { 1344 struct icm_notification *n; 1345 1346 n = kmalloc(sizeof(*n), GFP_KERNEL); 1347 if (!n) 1348 return; 1349 1350 INIT_WORK(&n->work, icm_handle_notification); 1351 n->pkg = kmemdup(buf, size, GFP_KERNEL); 1352 n->tb = tb; 1353 1354 queue_work(tb->wq, &n->work); 1355 } 1356 1357 static int 1358 __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level, 1359 size_t *nboot_acl) 1360 { 1361 struct icm *icm = tb_priv(tb); 1362 unsigned int retries = 50; 1363 int ret; 1364 1365 ret = icm->driver_ready(tb, security_level, nboot_acl); 1366 if (ret) { 1367 tb_err(tb, "failed to send driver ready to ICM\n"); 1368 return ret; 1369 } 1370 1371 /* 1372 * Hold on here until the switch config space is accessible so 1373 * that we can read root switch config successfully. 1374 */ 1375 do { 1376 struct tb_cfg_result res; 1377 u32 tmp; 1378 1379 res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH, 1380 0, 1, 100); 1381 if (!res.err) 1382 return 0; 1383 1384 msleep(50); 1385 } while (--retries); 1386 1387 tb_err(tb, "failed to read root switch config space, giving up\n"); 1388 return -ETIMEDOUT; 1389 } 1390 1391 static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec) 1392 { 1393 unsigned long end = jiffies + msecs_to_jiffies(timeout_msec); 1394 u32 cmd; 1395 1396 do { 1397 pci_read_config_dword(icm->upstream_port, 1398 icm->vnd_cap + PCIE2CIO_CMD, &cmd); 1399 if (!(cmd & PCIE2CIO_CMD_START)) { 1400 if (cmd & PCIE2CIO_CMD_TIMEOUT) 1401 break; 1402 return 0; 1403 } 1404 1405 msleep(50); 1406 } while (time_before(jiffies, end)); 1407 1408 return -ETIMEDOUT; 1409 } 1410 1411 static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs, 1412 unsigned int port, unsigned int index, u32 *data) 1413 { 1414 struct pci_dev *pdev = icm->upstream_port; 1415 int ret, vnd_cap = icm->vnd_cap; 1416 u32 cmd; 1417 1418 cmd = index; 1419 cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; 1420 cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; 1421 cmd |= PCIE2CIO_CMD_START; 1422 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); 1423 1424 ret = pci2cio_wait_completion(icm, 5000); 1425 if (ret) 1426 return ret; 1427 1428 pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data); 1429 return 0; 1430 } 1431 1432 static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs, 1433 unsigned int port, unsigned int index, u32 data) 1434 { 1435 struct pci_dev *pdev = icm->upstream_port; 1436 int vnd_cap = icm->vnd_cap; 1437 u32 cmd; 1438 1439 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data); 1440 1441 cmd = index; 1442 cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; 1443 cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; 1444 cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START; 1445 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); 1446 1447 return pci2cio_wait_completion(icm, 5000); 1448 } 1449 1450 static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi) 1451 { 1452 struct icm *icm = tb_priv(tb); 1453 u32 val; 1454 1455 if (!icm->upstream_port) 1456 return -ENODEV; 1457 1458 /* Put ARC to wait for CIO reset event to happen */ 1459 val = ioread32(nhi->iobase + REG_FW_STS); 1460 val |= REG_FW_STS_CIO_RESET_REQ; 1461 iowrite32(val, nhi->iobase + REG_FW_STS); 1462 1463 /* Re-start ARC */ 1464 val = ioread32(nhi->iobase + REG_FW_STS); 1465 val |= REG_FW_STS_ICM_EN_INVERT; 1466 val |= REG_FW_STS_ICM_EN_CPU; 1467 iowrite32(val, nhi->iobase + REG_FW_STS); 1468 1469 /* Trigger CIO reset now */ 1470 return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9)); 1471 } 1472 1473 static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi) 1474 { 1475 unsigned int retries = 10; 1476 int ret; 1477 u32 val; 1478 1479 /* Check if the ICM firmware is already running */ 1480 val = ioread32(nhi->iobase + REG_FW_STS); 1481 if (val & REG_FW_STS_ICM_EN) 1482 return 0; 1483 1484 dev_info(&nhi->pdev->dev, "starting ICM firmware\n"); 1485 1486 ret = icm_firmware_reset(tb, nhi); 1487 if (ret) 1488 return ret; 1489 1490 /* Wait until the ICM firmware tells us it is up and running */ 1491 do { 1492 /* Check that the ICM firmware is running */ 1493 val = ioread32(nhi->iobase + REG_FW_STS); 1494 if (val & REG_FW_STS_NVM_AUTH_DONE) 1495 return 0; 1496 1497 msleep(300); 1498 } while (--retries); 1499 1500 return -ETIMEDOUT; 1501 } 1502 1503 static int icm_reset_phy_port(struct tb *tb, int phy_port) 1504 { 1505 struct icm *icm = tb_priv(tb); 1506 u32 state0, state1; 1507 int port0, port1; 1508 u32 val0, val1; 1509 int ret; 1510 1511 if (!icm->upstream_port) 1512 return 0; 1513 1514 if (phy_port) { 1515 port0 = 3; 1516 port1 = 4; 1517 } else { 1518 port0 = 1; 1519 port1 = 2; 1520 } 1521 1522 /* 1523 * Read link status of both null ports belonging to a single 1524 * physical port. 1525 */ 1526 ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0); 1527 if (ret) 1528 return ret; 1529 ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1); 1530 if (ret) 1531 return ret; 1532 1533 state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK; 1534 state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT; 1535 state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK; 1536 state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT; 1537 1538 /* If they are both up we need to reset them now */ 1539 if (state0 != TB_PORT_UP || state1 != TB_PORT_UP) 1540 return 0; 1541 1542 val0 |= PHY_PORT_CS1_LINK_DISABLE; 1543 ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0); 1544 if (ret) 1545 return ret; 1546 1547 val1 |= PHY_PORT_CS1_LINK_DISABLE; 1548 ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1); 1549 if (ret) 1550 return ret; 1551 1552 /* Wait a bit and then re-enable both ports */ 1553 usleep_range(10, 100); 1554 1555 ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0); 1556 if (ret) 1557 return ret; 1558 ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1); 1559 if (ret) 1560 return ret; 1561 1562 val0 &= ~PHY_PORT_CS1_LINK_DISABLE; 1563 ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0); 1564 if (ret) 1565 return ret; 1566 1567 val1 &= ~PHY_PORT_CS1_LINK_DISABLE; 1568 return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1); 1569 } 1570 1571 static int icm_firmware_init(struct tb *tb) 1572 { 1573 struct icm *icm = tb_priv(tb); 1574 struct tb_nhi *nhi = tb->nhi; 1575 int ret; 1576 1577 ret = icm_firmware_start(tb, nhi); 1578 if (ret) { 1579 dev_err(&nhi->pdev->dev, "could not start ICM firmware\n"); 1580 return ret; 1581 } 1582 1583 if (icm->get_mode) { 1584 ret = icm->get_mode(tb); 1585 1586 switch (ret) { 1587 case NHI_FW_SAFE_MODE: 1588 icm->safe_mode = true; 1589 break; 1590 1591 case NHI_FW_CM_MODE: 1592 /* Ask ICM to accept all Thunderbolt devices */ 1593 nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0); 1594 break; 1595 1596 default: 1597 if (ret < 0) 1598 return ret; 1599 1600 tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret); 1601 return -ENODEV; 1602 } 1603 } 1604 1605 /* 1606 * Reset both physical ports if there is anything connected to 1607 * them already. 1608 */ 1609 ret = icm_reset_phy_port(tb, 0); 1610 if (ret) 1611 dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n"); 1612 ret = icm_reset_phy_port(tb, 1); 1613 if (ret) 1614 dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n"); 1615 1616 return 0; 1617 } 1618 1619 static int icm_driver_ready(struct tb *tb) 1620 { 1621 struct icm *icm = tb_priv(tb); 1622 int ret; 1623 1624 ret = icm_firmware_init(tb); 1625 if (ret) 1626 return ret; 1627 1628 if (icm->safe_mode) { 1629 tb_info(tb, "Thunderbolt host controller is in safe mode.\n"); 1630 tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n"); 1631 tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n"); 1632 return 0; 1633 } 1634 1635 ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl); 1636 if (ret) 1637 return ret; 1638 1639 /* 1640 * Make sure the number of supported preboot ACL matches what we 1641 * expect or disable the whole feature. 1642 */ 1643 if (tb->nboot_acl > icm->max_boot_acl) 1644 tb->nboot_acl = 0; 1645 1646 return 0; 1647 } 1648 1649 static int icm_suspend(struct tb *tb) 1650 { 1651 int ret; 1652 1653 ret = nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0); 1654 if (ret) 1655 tb_info(tb, "Ignoring mailbox command error (%d) in %s\n", 1656 ret, __func__); 1657 1658 return 0; 1659 } 1660 1661 /* 1662 * Mark all switches (except root switch) below this one unplugged. ICM 1663 * firmware will send us an updated list of switches after we have send 1664 * it driver ready command. If a switch is not in that list it will be 1665 * removed when we perform rescan. 1666 */ 1667 static void icm_unplug_children(struct tb_switch *sw) 1668 { 1669 unsigned int i; 1670 1671 if (tb_route(sw)) 1672 sw->is_unplugged = true; 1673 1674 for (i = 1; i <= sw->config.max_port_number; i++) { 1675 struct tb_port *port = &sw->ports[i]; 1676 1677 if (tb_is_upstream_port(port)) 1678 continue; 1679 if (port->xdomain) { 1680 port->xdomain->is_unplugged = true; 1681 continue; 1682 } 1683 if (!port->remote) 1684 continue; 1685 1686 icm_unplug_children(port->remote->sw); 1687 } 1688 } 1689 1690 static void icm_free_unplugged_children(struct tb_switch *sw) 1691 { 1692 unsigned int i; 1693 1694 for (i = 1; i <= sw->config.max_port_number; i++) { 1695 struct tb_port *port = &sw->ports[i]; 1696 1697 if (tb_is_upstream_port(port)) 1698 continue; 1699 1700 if (port->xdomain && port->xdomain->is_unplugged) { 1701 tb_xdomain_remove(port->xdomain); 1702 port->xdomain = NULL; 1703 continue; 1704 } 1705 1706 if (!port->remote) 1707 continue; 1708 1709 if (port->remote->sw->is_unplugged) { 1710 tb_switch_remove(port->remote->sw); 1711 port->remote = NULL; 1712 } else { 1713 icm_free_unplugged_children(port->remote->sw); 1714 } 1715 } 1716 } 1717 1718 static void icm_rescan_work(struct work_struct *work) 1719 { 1720 struct icm *icm = container_of(work, struct icm, rescan_work.work); 1721 struct tb *tb = icm_to_tb(icm); 1722 1723 mutex_lock(&tb->lock); 1724 if (tb->root_switch) 1725 icm_free_unplugged_children(tb->root_switch); 1726 mutex_unlock(&tb->lock); 1727 } 1728 1729 static void icm_complete(struct tb *tb) 1730 { 1731 struct icm *icm = tb_priv(tb); 1732 1733 if (tb->nhi->going_away) 1734 return; 1735 1736 icm_unplug_children(tb->root_switch); 1737 1738 /* 1739 * Now all existing children should be resumed, start events 1740 * from ICM to get updated status. 1741 */ 1742 __icm_driver_ready(tb, NULL, NULL); 1743 1744 /* 1745 * We do not get notifications of devices that have been 1746 * unplugged during suspend so schedule rescan to clean them up 1747 * if any. 1748 */ 1749 queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500)); 1750 } 1751 1752 static int icm_start(struct tb *tb) 1753 { 1754 struct icm *icm = tb_priv(tb); 1755 int ret; 1756 1757 if (icm->safe_mode) 1758 tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0); 1759 else 1760 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); 1761 if (!tb->root_switch) 1762 return -ENODEV; 1763 1764 /* 1765 * NVM upgrade has not been tested on Apple systems and they 1766 * don't provide images publicly either. To be on the safe side 1767 * prevent root switch NVM upgrade on Macs for now. 1768 */ 1769 tb->root_switch->no_nvm_upgrade = x86_apple_machine; 1770 1771 ret = tb_switch_add(tb->root_switch); 1772 if (ret) { 1773 tb_switch_put(tb->root_switch); 1774 tb->root_switch = NULL; 1775 } 1776 1777 return ret; 1778 } 1779 1780 static void icm_stop(struct tb *tb) 1781 { 1782 struct icm *icm = tb_priv(tb); 1783 1784 cancel_delayed_work(&icm->rescan_work); 1785 tb_switch_remove(tb->root_switch); 1786 tb->root_switch = NULL; 1787 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); 1788 } 1789 1790 static int icm_disconnect_pcie_paths(struct tb *tb) 1791 { 1792 return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0); 1793 } 1794 1795 /* Falcon Ridge */ 1796 static const struct tb_cm_ops icm_fr_ops = { 1797 .driver_ready = icm_driver_ready, 1798 .start = icm_start, 1799 .stop = icm_stop, 1800 .suspend = icm_suspend, 1801 .complete = icm_complete, 1802 .handle_event = icm_handle_event, 1803 .approve_switch = icm_fr_approve_switch, 1804 .add_switch_key = icm_fr_add_switch_key, 1805 .challenge_switch_key = icm_fr_challenge_switch_key, 1806 .disconnect_pcie_paths = icm_disconnect_pcie_paths, 1807 .approve_xdomain_paths = icm_fr_approve_xdomain_paths, 1808 .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths, 1809 }; 1810 1811 /* Alpine Ridge */ 1812 static const struct tb_cm_ops icm_ar_ops = { 1813 .driver_ready = icm_driver_ready, 1814 .start = icm_start, 1815 .stop = icm_stop, 1816 .suspend = icm_suspend, 1817 .complete = icm_complete, 1818 .handle_event = icm_handle_event, 1819 .get_boot_acl = icm_ar_get_boot_acl, 1820 .set_boot_acl = icm_ar_set_boot_acl, 1821 .approve_switch = icm_fr_approve_switch, 1822 .add_switch_key = icm_fr_add_switch_key, 1823 .challenge_switch_key = icm_fr_challenge_switch_key, 1824 .disconnect_pcie_paths = icm_disconnect_pcie_paths, 1825 .approve_xdomain_paths = icm_fr_approve_xdomain_paths, 1826 .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths, 1827 }; 1828 1829 /* Titan Ridge */ 1830 static const struct tb_cm_ops icm_tr_ops = { 1831 .driver_ready = icm_driver_ready, 1832 .start = icm_start, 1833 .stop = icm_stop, 1834 .suspend = icm_suspend, 1835 .complete = icm_complete, 1836 .handle_event = icm_handle_event, 1837 .get_boot_acl = icm_ar_get_boot_acl, 1838 .set_boot_acl = icm_ar_set_boot_acl, 1839 .approve_switch = icm_tr_approve_switch, 1840 .add_switch_key = icm_tr_add_switch_key, 1841 .challenge_switch_key = icm_tr_challenge_switch_key, 1842 .disconnect_pcie_paths = icm_disconnect_pcie_paths, 1843 .approve_xdomain_paths = icm_tr_approve_xdomain_paths, 1844 .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, 1845 }; 1846 1847 struct tb *icm_probe(struct tb_nhi *nhi) 1848 { 1849 struct icm *icm; 1850 struct tb *tb; 1851 1852 tb = tb_domain_alloc(nhi, sizeof(struct icm)); 1853 if (!tb) 1854 return NULL; 1855 1856 icm = tb_priv(tb); 1857 INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work); 1858 mutex_init(&icm->request_lock); 1859 1860 switch (nhi->pdev->device) { 1861 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: 1862 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: 1863 icm->is_supported = icm_fr_is_supported; 1864 icm->get_route = icm_fr_get_route; 1865 icm->driver_ready = icm_fr_driver_ready; 1866 icm->device_connected = icm_fr_device_connected; 1867 icm->device_disconnected = icm_fr_device_disconnected; 1868 icm->xdomain_connected = icm_fr_xdomain_connected; 1869 icm->xdomain_disconnected = icm_fr_xdomain_disconnected; 1870 tb->cm_ops = &icm_fr_ops; 1871 break; 1872 1873 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI: 1874 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI: 1875 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI: 1876 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI: 1877 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI: 1878 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; 1879 icm->is_supported = icm_ar_is_supported; 1880 icm->get_mode = icm_ar_get_mode; 1881 icm->get_route = icm_ar_get_route; 1882 icm->driver_ready = icm_ar_driver_ready; 1883 icm->device_connected = icm_fr_device_connected; 1884 icm->device_disconnected = icm_fr_device_disconnected; 1885 icm->xdomain_connected = icm_fr_xdomain_connected; 1886 icm->xdomain_disconnected = icm_fr_xdomain_disconnected; 1887 tb->cm_ops = &icm_ar_ops; 1888 break; 1889 1890 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI: 1891 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI: 1892 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; 1893 icm->is_supported = icm_ar_is_supported; 1894 icm->get_mode = icm_ar_get_mode; 1895 icm->driver_ready = icm_tr_driver_ready; 1896 icm->device_connected = icm_tr_device_connected; 1897 icm->device_disconnected = icm_tr_device_disconnected; 1898 icm->xdomain_connected = icm_tr_xdomain_connected; 1899 icm->xdomain_disconnected = icm_tr_xdomain_disconnected; 1900 tb->cm_ops = &icm_tr_ops; 1901 break; 1902 } 1903 1904 if (!icm->is_supported || !icm->is_supported(tb)) { 1905 dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n"); 1906 tb_domain_put(tb); 1907 return NULL; 1908 } 1909 1910 return tb; 1911 } 1912