1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Internal Thunderbolt Connection Manager. This is a firmware running on 4 * the Thunderbolt host controller performing most of the low-level 5 * handling. 6 * 7 * Copyright (C) 2017, Intel Corporation 8 * Authors: Michael Jamet <michael.jamet@intel.com> 9 * Mika Westerberg <mika.westerberg@linux.intel.com> 10 */ 11 12 #include <linux/delay.h> 13 #include <linux/mutex.h> 14 #include <linux/pci.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/platform_data/x86/apple.h> 17 #include <linux/sizes.h> 18 #include <linux/slab.h> 19 #include <linux/workqueue.h> 20 21 #include "ctl.h" 22 #include "nhi_regs.h" 23 #include "tb.h" 24 25 #define PCIE2CIO_CMD 0x30 26 #define PCIE2CIO_CMD_TIMEOUT BIT(31) 27 #define PCIE2CIO_CMD_START BIT(30) 28 #define PCIE2CIO_CMD_WRITE BIT(21) 29 #define PCIE2CIO_CMD_CS_MASK GENMASK(20, 19) 30 #define PCIE2CIO_CMD_CS_SHIFT 19 31 #define PCIE2CIO_CMD_PORT_MASK GENMASK(18, 13) 32 #define PCIE2CIO_CMD_PORT_SHIFT 13 33 34 #define PCIE2CIO_WRDATA 0x34 35 #define PCIE2CIO_RDDATA 0x38 36 37 #define PHY_PORT_CS1 0x37 38 #define PHY_PORT_CS1_LINK_DISABLE BIT(14) 39 #define PHY_PORT_CS1_LINK_STATE_MASK GENMASK(29, 26) 40 #define PHY_PORT_CS1_LINK_STATE_SHIFT 26 41 42 #define ICM_TIMEOUT 5000 /* ms */ 43 #define ICM_APPROVE_TIMEOUT 10000 /* ms */ 44 #define ICM_MAX_LINK 4 45 46 /** 47 * struct icm - Internal connection manager private data 48 * @request_lock: Makes sure only one message is send to ICM at time 49 * @rescan_work: Work used to rescan the surviving switches after resume 50 * @upstream_port: Pointer to the PCIe upstream port this host 51 * controller is connected. This is only set for systems 52 * where ICM needs to be started manually 53 * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides 54 * (only set when @upstream_port is not %NULL) 55 * @safe_mode: ICM is in safe mode 56 * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported) 57 * @rpm: Does the controller support runtime PM (RTD3) 58 * @is_supported: Checks if we can support ICM on this controller 59 * @get_mode: Read and return the ICM firmware mode (optional) 60 * @get_route: Find a route string for given switch 61 * @save_devices: Ask ICM to save devices to ACL when suspending (optional) 62 * @driver_ready: Send driver ready message to ICM 63 * @device_connected: Handle device connected ICM message 64 * @device_disconnected: Handle device disconnected ICM message 65 * @xdomain_connected - Handle XDomain connected ICM message 66 * @xdomain_disconnected - Handle XDomain disconnected ICM message 67 */ 68 struct icm { 69 struct mutex request_lock; 70 struct delayed_work rescan_work; 71 struct pci_dev *upstream_port; 72 size_t max_boot_acl; 73 int vnd_cap; 74 bool safe_mode; 75 bool rpm; 76 bool (*is_supported)(struct tb *tb); 77 int (*get_mode)(struct tb *tb); 78 int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route); 79 void (*save_devices)(struct tb *tb); 80 int (*driver_ready)(struct tb *tb, 81 enum tb_security_level *security_level, 82 size_t *nboot_acl, bool *rpm); 83 void (*device_connected)(struct tb *tb, 84 const struct icm_pkg_header *hdr); 85 void (*device_disconnected)(struct tb *tb, 86 const struct icm_pkg_header *hdr); 87 void (*xdomain_connected)(struct tb *tb, 88 const struct icm_pkg_header *hdr); 89 void (*xdomain_disconnected)(struct tb *tb, 90 const struct icm_pkg_header *hdr); 91 }; 92 93 struct icm_notification { 94 struct work_struct work; 95 struct icm_pkg_header *pkg; 96 struct tb *tb; 97 }; 98 99 struct ep_name_entry { 100 u8 len; 101 u8 type; 102 u8 data[0]; 103 }; 104 105 #define EP_NAME_INTEL_VSS 0x10 106 107 /* Intel Vendor specific structure */ 108 struct intel_vss { 109 u16 vendor; 110 u16 model; 111 u8 mc; 112 u8 flags; 113 u16 pci_devid; 114 u32 nvm_version; 115 }; 116 117 #define INTEL_VSS_FLAGS_RTD3 BIT(0) 118 119 static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size) 120 { 121 const void *end = ep_name + size; 122 123 while (ep_name < end) { 124 const struct ep_name_entry *ep = ep_name; 125 126 if (!ep->len) 127 break; 128 if (ep_name + ep->len > end) 129 break; 130 131 if (ep->type == EP_NAME_INTEL_VSS) 132 return (const struct intel_vss *)ep->data; 133 134 ep_name += ep->len; 135 } 136 137 return NULL; 138 } 139 140 static inline struct tb *icm_to_tb(struct icm *icm) 141 { 142 return ((void *)icm - sizeof(struct tb)); 143 } 144 145 static inline u8 phy_port_from_route(u64 route, u8 depth) 146 { 147 u8 link; 148 149 link = depth ? route >> ((depth - 1) * 8) : route; 150 return tb_phy_port_from_link(link); 151 } 152 153 static inline u8 dual_link_from_link(u8 link) 154 { 155 return link ? ((link - 1) ^ 0x01) + 1 : 0; 156 } 157 158 static inline u64 get_route(u32 route_hi, u32 route_lo) 159 { 160 return (u64)route_hi << 32 | route_lo; 161 } 162 163 static inline u64 get_parent_route(u64 route) 164 { 165 int depth = tb_route_length(route); 166 return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0; 167 } 168 169 static bool icm_match(const struct tb_cfg_request *req, 170 const struct ctl_pkg *pkg) 171 { 172 const struct icm_pkg_header *res_hdr = pkg->buffer; 173 const struct icm_pkg_header *req_hdr = req->request; 174 175 if (pkg->frame.eof != req->response_type) 176 return false; 177 if (res_hdr->code != req_hdr->code) 178 return false; 179 180 return true; 181 } 182 183 static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) 184 { 185 const struct icm_pkg_header *hdr = pkg->buffer; 186 187 if (hdr->packet_id < req->npackets) { 188 size_t offset = hdr->packet_id * req->response_size; 189 190 memcpy(req->response + offset, pkg->buffer, req->response_size); 191 } 192 193 return hdr->packet_id == hdr->total_packets - 1; 194 } 195 196 static int icm_request(struct tb *tb, const void *request, size_t request_size, 197 void *response, size_t response_size, size_t npackets, 198 unsigned int timeout_msec) 199 { 200 struct icm *icm = tb_priv(tb); 201 int retries = 3; 202 203 do { 204 struct tb_cfg_request *req; 205 struct tb_cfg_result res; 206 207 req = tb_cfg_request_alloc(); 208 if (!req) 209 return -ENOMEM; 210 211 req->match = icm_match; 212 req->copy = icm_copy; 213 req->request = request; 214 req->request_size = request_size; 215 req->request_type = TB_CFG_PKG_ICM_CMD; 216 req->response = response; 217 req->npackets = npackets; 218 req->response_size = response_size; 219 req->response_type = TB_CFG_PKG_ICM_RESP; 220 221 mutex_lock(&icm->request_lock); 222 res = tb_cfg_request_sync(tb->ctl, req, timeout_msec); 223 mutex_unlock(&icm->request_lock); 224 225 tb_cfg_request_put(req); 226 227 if (res.err != -ETIMEDOUT) 228 return res.err == 1 ? -EIO : res.err; 229 230 usleep_range(20, 50); 231 } while (retries--); 232 233 return -ETIMEDOUT; 234 } 235 236 static bool icm_fr_is_supported(struct tb *tb) 237 { 238 return !x86_apple_machine; 239 } 240 241 static inline int icm_fr_get_switch_index(u32 port) 242 { 243 int index; 244 245 if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT) 246 return 0; 247 248 index = port >> ICM_PORT_INDEX_SHIFT; 249 return index != 0xff ? index : 0; 250 } 251 252 static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) 253 { 254 struct icm_fr_pkg_get_topology_response *switches, *sw; 255 struct icm_fr_pkg_get_topology request = { 256 .hdr = { .code = ICM_GET_TOPOLOGY }, 257 }; 258 size_t npackets = ICM_GET_TOPOLOGY_PACKETS; 259 int ret, index; 260 u8 i; 261 262 switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL); 263 if (!switches) 264 return -ENOMEM; 265 266 ret = icm_request(tb, &request, sizeof(request), switches, 267 sizeof(*switches), npackets, ICM_TIMEOUT); 268 if (ret) 269 goto err_free; 270 271 sw = &switches[0]; 272 index = icm_fr_get_switch_index(sw->ports[link]); 273 if (!index) { 274 ret = -ENODEV; 275 goto err_free; 276 } 277 278 sw = &switches[index]; 279 for (i = 1; i < depth; i++) { 280 unsigned int j; 281 282 if (!(sw->first_data & ICM_SWITCH_USED)) { 283 ret = -ENODEV; 284 goto err_free; 285 } 286 287 for (j = 0; j < ARRAY_SIZE(sw->ports); j++) { 288 index = icm_fr_get_switch_index(sw->ports[j]); 289 if (index > sw->switch_index) { 290 sw = &switches[index]; 291 break; 292 } 293 } 294 } 295 296 *route = get_route(sw->route_hi, sw->route_lo); 297 298 err_free: 299 kfree(switches); 300 return ret; 301 } 302 303 static void icm_fr_save_devices(struct tb *tb) 304 { 305 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0); 306 } 307 308 static int 309 icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level, 310 size_t *nboot_acl, bool *rpm) 311 { 312 struct icm_fr_pkg_driver_ready_response reply; 313 struct icm_pkg_driver_ready request = { 314 .hdr.code = ICM_DRIVER_READY, 315 }; 316 int ret; 317 318 memset(&reply, 0, sizeof(reply)); 319 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 320 1, ICM_TIMEOUT); 321 if (ret) 322 return ret; 323 324 if (security_level) 325 *security_level = reply.security_level & ICM_FR_SLEVEL_MASK; 326 327 return 0; 328 } 329 330 static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw) 331 { 332 struct icm_fr_pkg_approve_device request; 333 struct icm_fr_pkg_approve_device reply; 334 int ret; 335 336 memset(&request, 0, sizeof(request)); 337 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 338 request.hdr.code = ICM_APPROVE_DEVICE; 339 request.connection_id = sw->connection_id; 340 request.connection_key = sw->connection_key; 341 342 memset(&reply, 0, sizeof(reply)); 343 /* Use larger timeout as establishing tunnels can take some time */ 344 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 345 1, ICM_APPROVE_TIMEOUT); 346 if (ret) 347 return ret; 348 349 if (reply.hdr.flags & ICM_FLAGS_ERROR) { 350 tb_warn(tb, "PCIe tunnel creation failed\n"); 351 return -EIO; 352 } 353 354 return 0; 355 } 356 357 static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw) 358 { 359 struct icm_fr_pkg_add_device_key request; 360 struct icm_fr_pkg_add_device_key_response reply; 361 int ret; 362 363 memset(&request, 0, sizeof(request)); 364 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 365 request.hdr.code = ICM_ADD_DEVICE_KEY; 366 request.connection_id = sw->connection_id; 367 request.connection_key = sw->connection_key; 368 memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); 369 370 memset(&reply, 0, sizeof(reply)); 371 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 372 1, ICM_TIMEOUT); 373 if (ret) 374 return ret; 375 376 if (reply.hdr.flags & ICM_FLAGS_ERROR) { 377 tb_warn(tb, "Adding key to switch failed\n"); 378 return -EIO; 379 } 380 381 return 0; 382 } 383 384 static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, 385 const u8 *challenge, u8 *response) 386 { 387 struct icm_fr_pkg_challenge_device request; 388 struct icm_fr_pkg_challenge_device_response reply; 389 int ret; 390 391 memset(&request, 0, sizeof(request)); 392 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 393 request.hdr.code = ICM_CHALLENGE_DEVICE; 394 request.connection_id = sw->connection_id; 395 request.connection_key = sw->connection_key; 396 memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE); 397 398 memset(&reply, 0, sizeof(reply)); 399 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 400 1, ICM_TIMEOUT); 401 if (ret) 402 return ret; 403 404 if (reply.hdr.flags & ICM_FLAGS_ERROR) 405 return -EKEYREJECTED; 406 if (reply.hdr.flags & ICM_FLAGS_NO_KEY) 407 return -ENOKEY; 408 409 memcpy(response, reply.response, TB_SWITCH_KEY_SIZE); 410 411 return 0; 412 } 413 414 static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 415 { 416 struct icm_fr_pkg_approve_xdomain_response reply; 417 struct icm_fr_pkg_approve_xdomain request; 418 int ret; 419 420 memset(&request, 0, sizeof(request)); 421 request.hdr.code = ICM_APPROVE_XDOMAIN; 422 request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link; 423 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); 424 425 request.transmit_path = xd->transmit_path; 426 request.transmit_ring = xd->transmit_ring; 427 request.receive_path = xd->receive_path; 428 request.receive_ring = xd->receive_ring; 429 430 memset(&reply, 0, sizeof(reply)); 431 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 432 1, ICM_TIMEOUT); 433 if (ret) 434 return ret; 435 436 if (reply.hdr.flags & ICM_FLAGS_ERROR) 437 return -EIO; 438 439 return 0; 440 } 441 442 static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 443 { 444 u8 phy_port; 445 u8 cmd; 446 447 phy_port = tb_phy_port_from_link(xd->link); 448 if (phy_port == 0) 449 cmd = NHI_MAILBOX_DISCONNECT_PA; 450 else 451 cmd = NHI_MAILBOX_DISCONNECT_PB; 452 453 nhi_mailbox_cmd(tb->nhi, cmd, 1); 454 usleep_range(10, 50); 455 nhi_mailbox_cmd(tb->nhi, cmd, 2); 456 return 0; 457 } 458 459 static void add_switch(struct tb_switch *parent_sw, u64 route, 460 const uuid_t *uuid, const u8 *ep_name, 461 size_t ep_name_size, u8 connection_id, u8 connection_key, 462 u8 link, u8 depth, enum tb_security_level security_level, 463 bool authorized, bool boot) 464 { 465 const struct intel_vss *vss; 466 struct tb_switch *sw; 467 468 pm_runtime_get_sync(&parent_sw->dev); 469 470 sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route); 471 if (IS_ERR(sw)) 472 goto out; 473 474 sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL); 475 if (!sw->uuid) { 476 tb_sw_warn(sw, "cannot allocate memory for switch\n"); 477 tb_switch_put(sw); 478 goto out; 479 } 480 sw->connection_id = connection_id; 481 sw->connection_key = connection_key; 482 sw->link = link; 483 sw->depth = depth; 484 sw->authorized = authorized; 485 sw->security_level = security_level; 486 sw->boot = boot; 487 488 vss = parse_intel_vss(ep_name, ep_name_size); 489 if (vss) 490 sw->rpm = !!(vss->flags & INTEL_VSS_FLAGS_RTD3); 491 492 /* Link the two switches now */ 493 tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); 494 tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw); 495 496 if (tb_switch_add(sw)) { 497 tb_port_at(tb_route(sw), parent_sw)->remote = NULL; 498 tb_switch_put(sw); 499 } 500 501 out: 502 pm_runtime_mark_last_busy(&parent_sw->dev); 503 pm_runtime_put_autosuspend(&parent_sw->dev); 504 } 505 506 static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw, 507 u64 route, u8 connection_id, u8 connection_key, 508 u8 link, u8 depth, bool boot) 509 { 510 /* Disconnect from parent */ 511 tb_port_at(tb_route(sw), parent_sw)->remote = NULL; 512 /* Re-connect via updated port*/ 513 tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); 514 515 /* Update with the new addressing information */ 516 sw->config.route_hi = upper_32_bits(route); 517 sw->config.route_lo = lower_32_bits(route); 518 sw->connection_id = connection_id; 519 sw->connection_key = connection_key; 520 sw->link = link; 521 sw->depth = depth; 522 sw->boot = boot; 523 524 /* This switch still exists */ 525 sw->is_unplugged = false; 526 } 527 528 static void remove_switch(struct tb_switch *sw) 529 { 530 struct tb_switch *parent_sw; 531 532 parent_sw = tb_to_switch(sw->dev.parent); 533 tb_port_at(tb_route(sw), parent_sw)->remote = NULL; 534 tb_switch_remove(sw); 535 } 536 537 static void add_xdomain(struct tb_switch *sw, u64 route, 538 const uuid_t *local_uuid, const uuid_t *remote_uuid, 539 u8 link, u8 depth) 540 { 541 struct tb_xdomain *xd; 542 543 pm_runtime_get_sync(&sw->dev); 544 545 xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid); 546 if (!xd) 547 goto out; 548 549 xd->link = link; 550 xd->depth = depth; 551 552 tb_port_at(route, sw)->xdomain = xd; 553 554 tb_xdomain_add(xd); 555 556 out: 557 pm_runtime_mark_last_busy(&sw->dev); 558 pm_runtime_put_autosuspend(&sw->dev); 559 } 560 561 static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link) 562 { 563 xd->link = link; 564 xd->route = route; 565 xd->is_unplugged = false; 566 } 567 568 static void remove_xdomain(struct tb_xdomain *xd) 569 { 570 struct tb_switch *sw; 571 572 sw = tb_to_switch(xd->dev.parent); 573 tb_port_at(xd->route, sw)->xdomain = NULL; 574 tb_xdomain_remove(xd); 575 } 576 577 static void 578 icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) 579 { 580 const struct icm_fr_event_device_connected *pkg = 581 (const struct icm_fr_event_device_connected *)hdr; 582 enum tb_security_level security_level; 583 struct tb_switch *sw, *parent_sw; 584 struct icm *icm = tb_priv(tb); 585 bool authorized = false; 586 struct tb_xdomain *xd; 587 u8 link, depth; 588 bool boot; 589 u64 route; 590 int ret; 591 592 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; 593 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> 594 ICM_LINK_INFO_DEPTH_SHIFT; 595 authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; 596 security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> 597 ICM_FLAGS_SLEVEL_SHIFT; 598 boot = pkg->link_info & ICM_LINK_INFO_BOOT; 599 600 if (pkg->link_info & ICM_LINK_INFO_REJECTED) { 601 tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n", 602 link, depth); 603 return; 604 } 605 606 sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid); 607 if (sw) { 608 u8 phy_port, sw_phy_port; 609 610 parent_sw = tb_to_switch(sw->dev.parent); 611 sw_phy_port = tb_phy_port_from_link(sw->link); 612 phy_port = tb_phy_port_from_link(link); 613 614 /* 615 * On resume ICM will send us connected events for the 616 * devices that still are present. However, that 617 * information might have changed for example by the 618 * fact that a switch on a dual-link connection might 619 * have been enumerated using the other link now. Make 620 * sure our book keeping matches that. 621 */ 622 if (sw->depth == depth && sw_phy_port == phy_port && 623 !!sw->authorized == authorized) { 624 /* 625 * It was enumerated through another link so update 626 * route string accordingly. 627 */ 628 if (sw->link != link) { 629 ret = icm->get_route(tb, link, depth, &route); 630 if (ret) { 631 tb_err(tb, "failed to update route string for switch at %u.%u\n", 632 link, depth); 633 tb_switch_put(sw); 634 return; 635 } 636 } else { 637 route = tb_route(sw); 638 } 639 640 update_switch(parent_sw, sw, route, pkg->connection_id, 641 pkg->connection_key, link, depth, boot); 642 tb_switch_put(sw); 643 return; 644 } 645 646 /* 647 * User connected the same switch to another physical 648 * port or to another part of the topology. Remove the 649 * existing switch now before adding the new one. 650 */ 651 remove_switch(sw); 652 tb_switch_put(sw); 653 } 654 655 /* 656 * If the switch was not found by UUID, look for a switch on 657 * same physical port (taking possible link aggregation into 658 * account) and depth. If we found one it is definitely a stale 659 * one so remove it first. 660 */ 661 sw = tb_switch_find_by_link_depth(tb, link, depth); 662 if (!sw) { 663 u8 dual_link; 664 665 dual_link = dual_link_from_link(link); 666 if (dual_link) 667 sw = tb_switch_find_by_link_depth(tb, dual_link, depth); 668 } 669 if (sw) { 670 remove_switch(sw); 671 tb_switch_put(sw); 672 } 673 674 /* Remove existing XDomain connection if found */ 675 xd = tb_xdomain_find_by_link_depth(tb, link, depth); 676 if (xd) { 677 remove_xdomain(xd); 678 tb_xdomain_put(xd); 679 } 680 681 parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1); 682 if (!parent_sw) { 683 tb_err(tb, "failed to find parent switch for %u.%u\n", 684 link, depth); 685 return; 686 } 687 688 ret = icm->get_route(tb, link, depth, &route); 689 if (ret) { 690 tb_err(tb, "failed to find route string for switch at %u.%u\n", 691 link, depth); 692 tb_switch_put(parent_sw); 693 return; 694 } 695 696 add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, 697 sizeof(pkg->ep_name), pkg->connection_id, 698 pkg->connection_key, link, depth, security_level, 699 authorized, boot); 700 701 tb_switch_put(parent_sw); 702 } 703 704 static void 705 icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) 706 { 707 const struct icm_fr_event_device_disconnected *pkg = 708 (const struct icm_fr_event_device_disconnected *)hdr; 709 struct tb_switch *sw; 710 u8 link, depth; 711 712 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; 713 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> 714 ICM_LINK_INFO_DEPTH_SHIFT; 715 716 if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) { 717 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth); 718 return; 719 } 720 721 sw = tb_switch_find_by_link_depth(tb, link, depth); 722 if (!sw) { 723 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link, 724 depth); 725 return; 726 } 727 728 remove_switch(sw); 729 tb_switch_put(sw); 730 } 731 732 static void 733 icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr) 734 { 735 const struct icm_fr_event_xdomain_connected *pkg = 736 (const struct icm_fr_event_xdomain_connected *)hdr; 737 struct tb_xdomain *xd; 738 struct tb_switch *sw; 739 u8 link, depth; 740 u64 route; 741 742 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; 743 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> 744 ICM_LINK_INFO_DEPTH_SHIFT; 745 746 if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) { 747 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth); 748 return; 749 } 750 751 route = get_route(pkg->local_route_hi, pkg->local_route_lo); 752 753 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); 754 if (xd) { 755 u8 xd_phy_port, phy_port; 756 757 xd_phy_port = phy_port_from_route(xd->route, xd->depth); 758 phy_port = phy_port_from_route(route, depth); 759 760 if (xd->depth == depth && xd_phy_port == phy_port) { 761 update_xdomain(xd, route, link); 762 tb_xdomain_put(xd); 763 return; 764 } 765 766 /* 767 * If we find an existing XDomain connection remove it 768 * now. We need to go through login handshake and 769 * everything anyway to be able to re-establish the 770 * connection. 771 */ 772 remove_xdomain(xd); 773 tb_xdomain_put(xd); 774 } 775 776 /* 777 * Look if there already exists an XDomain in the same place 778 * than the new one and in that case remove it because it is 779 * most likely another host that got disconnected. 780 */ 781 xd = tb_xdomain_find_by_link_depth(tb, link, depth); 782 if (!xd) { 783 u8 dual_link; 784 785 dual_link = dual_link_from_link(link); 786 if (dual_link) 787 xd = tb_xdomain_find_by_link_depth(tb, dual_link, 788 depth); 789 } 790 if (xd) { 791 remove_xdomain(xd); 792 tb_xdomain_put(xd); 793 } 794 795 /* 796 * If the user disconnected a switch during suspend and 797 * connected another host to the same port, remove the switch 798 * first. 799 */ 800 sw = tb_switch_find_by_route(tb, route); 801 if (sw) { 802 remove_switch(sw); 803 tb_switch_put(sw); 804 } 805 806 sw = tb_switch_find_by_link_depth(tb, link, depth); 807 if (!sw) { 808 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link, 809 depth); 810 return; 811 } 812 813 add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link, 814 depth); 815 tb_switch_put(sw); 816 } 817 818 static void 819 icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) 820 { 821 const struct icm_fr_event_xdomain_disconnected *pkg = 822 (const struct icm_fr_event_xdomain_disconnected *)hdr; 823 struct tb_xdomain *xd; 824 825 /* 826 * If the connection is through one or multiple devices, the 827 * XDomain device is removed along with them so it is fine if we 828 * cannot find it here. 829 */ 830 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); 831 if (xd) { 832 remove_xdomain(xd); 833 tb_xdomain_put(xd); 834 } 835 } 836 837 static int 838 icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level, 839 size_t *nboot_acl, bool *rpm) 840 { 841 struct icm_tr_pkg_driver_ready_response reply; 842 struct icm_pkg_driver_ready request = { 843 .hdr.code = ICM_DRIVER_READY, 844 }; 845 int ret; 846 847 memset(&reply, 0, sizeof(reply)); 848 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 849 1, 20000); 850 if (ret) 851 return ret; 852 853 if (security_level) 854 *security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK; 855 if (nboot_acl) 856 *nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >> 857 ICM_TR_INFO_BOOT_ACL_SHIFT; 858 if (rpm) 859 *rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3); 860 861 return 0; 862 } 863 864 static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw) 865 { 866 struct icm_tr_pkg_approve_device request; 867 struct icm_tr_pkg_approve_device reply; 868 int ret; 869 870 memset(&request, 0, sizeof(request)); 871 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 872 request.hdr.code = ICM_APPROVE_DEVICE; 873 request.route_lo = sw->config.route_lo; 874 request.route_hi = sw->config.route_hi; 875 request.connection_id = sw->connection_id; 876 877 memset(&reply, 0, sizeof(reply)); 878 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 879 1, ICM_APPROVE_TIMEOUT); 880 if (ret) 881 return ret; 882 883 if (reply.hdr.flags & ICM_FLAGS_ERROR) { 884 tb_warn(tb, "PCIe tunnel creation failed\n"); 885 return -EIO; 886 } 887 888 return 0; 889 } 890 891 static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw) 892 { 893 struct icm_tr_pkg_add_device_key_response reply; 894 struct icm_tr_pkg_add_device_key request; 895 int ret; 896 897 memset(&request, 0, sizeof(request)); 898 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 899 request.hdr.code = ICM_ADD_DEVICE_KEY; 900 request.route_lo = sw->config.route_lo; 901 request.route_hi = sw->config.route_hi; 902 request.connection_id = sw->connection_id; 903 memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); 904 905 memset(&reply, 0, sizeof(reply)); 906 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 907 1, ICM_TIMEOUT); 908 if (ret) 909 return ret; 910 911 if (reply.hdr.flags & ICM_FLAGS_ERROR) { 912 tb_warn(tb, "Adding key to switch failed\n"); 913 return -EIO; 914 } 915 916 return 0; 917 } 918 919 static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, 920 const u8 *challenge, u8 *response) 921 { 922 struct icm_tr_pkg_challenge_device_response reply; 923 struct icm_tr_pkg_challenge_device request; 924 int ret; 925 926 memset(&request, 0, sizeof(request)); 927 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 928 request.hdr.code = ICM_CHALLENGE_DEVICE; 929 request.route_lo = sw->config.route_lo; 930 request.route_hi = sw->config.route_hi; 931 request.connection_id = sw->connection_id; 932 memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE); 933 934 memset(&reply, 0, sizeof(reply)); 935 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 936 1, ICM_TIMEOUT); 937 if (ret) 938 return ret; 939 940 if (reply.hdr.flags & ICM_FLAGS_ERROR) 941 return -EKEYREJECTED; 942 if (reply.hdr.flags & ICM_FLAGS_NO_KEY) 943 return -ENOKEY; 944 945 memcpy(response, reply.response, TB_SWITCH_KEY_SIZE); 946 947 return 0; 948 } 949 950 static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 951 { 952 struct icm_tr_pkg_approve_xdomain_response reply; 953 struct icm_tr_pkg_approve_xdomain request; 954 int ret; 955 956 memset(&request, 0, sizeof(request)); 957 request.hdr.code = ICM_APPROVE_XDOMAIN; 958 request.route_hi = upper_32_bits(xd->route); 959 request.route_lo = lower_32_bits(xd->route); 960 request.transmit_path = xd->transmit_path; 961 request.transmit_ring = xd->transmit_ring; 962 request.receive_path = xd->receive_path; 963 request.receive_ring = xd->receive_ring; 964 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); 965 966 memset(&reply, 0, sizeof(reply)); 967 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 968 1, ICM_TIMEOUT); 969 if (ret) 970 return ret; 971 972 if (reply.hdr.flags & ICM_FLAGS_ERROR) 973 return -EIO; 974 975 return 0; 976 } 977 978 static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd, 979 int stage) 980 { 981 struct icm_tr_pkg_disconnect_xdomain_response reply; 982 struct icm_tr_pkg_disconnect_xdomain request; 983 int ret; 984 985 memset(&request, 0, sizeof(request)); 986 request.hdr.code = ICM_DISCONNECT_XDOMAIN; 987 request.stage = stage; 988 request.route_hi = upper_32_bits(xd->route); 989 request.route_lo = lower_32_bits(xd->route); 990 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); 991 992 memset(&reply, 0, sizeof(reply)); 993 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 994 1, ICM_TIMEOUT); 995 if (ret) 996 return ret; 997 998 if (reply.hdr.flags & ICM_FLAGS_ERROR) 999 return -EIO; 1000 1001 return 0; 1002 } 1003 1004 static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 1005 { 1006 int ret; 1007 1008 ret = icm_tr_xdomain_tear_down(tb, xd, 1); 1009 if (ret) 1010 return ret; 1011 1012 usleep_range(10, 50); 1013 return icm_tr_xdomain_tear_down(tb, xd, 2); 1014 } 1015 1016 static void 1017 icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) 1018 { 1019 const struct icm_tr_event_device_connected *pkg = 1020 (const struct icm_tr_event_device_connected *)hdr; 1021 enum tb_security_level security_level; 1022 struct tb_switch *sw, *parent_sw; 1023 struct tb_xdomain *xd; 1024 bool authorized, boot; 1025 u64 route; 1026 1027 /* 1028 * Currently we don't use the QoS information coming with the 1029 * device connected message so simply just ignore that extra 1030 * packet for now. 1031 */ 1032 if (pkg->hdr.packet_id) 1033 return; 1034 1035 route = get_route(pkg->route_hi, pkg->route_lo); 1036 authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; 1037 security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> 1038 ICM_FLAGS_SLEVEL_SHIFT; 1039 boot = pkg->link_info & ICM_LINK_INFO_BOOT; 1040 1041 if (pkg->link_info & ICM_LINK_INFO_REJECTED) { 1042 tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n", 1043 route); 1044 return; 1045 } 1046 1047 sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid); 1048 if (sw) { 1049 /* Update the switch if it is still in the same place */ 1050 if (tb_route(sw) == route && !!sw->authorized == authorized) { 1051 parent_sw = tb_to_switch(sw->dev.parent); 1052 update_switch(parent_sw, sw, route, pkg->connection_id, 1053 0, 0, 0, boot); 1054 tb_switch_put(sw); 1055 return; 1056 } 1057 1058 remove_switch(sw); 1059 tb_switch_put(sw); 1060 } 1061 1062 /* Another switch with the same address */ 1063 sw = tb_switch_find_by_route(tb, route); 1064 if (sw) { 1065 remove_switch(sw); 1066 tb_switch_put(sw); 1067 } 1068 1069 /* XDomain connection with the same address */ 1070 xd = tb_xdomain_find_by_route(tb, route); 1071 if (xd) { 1072 remove_xdomain(xd); 1073 tb_xdomain_put(xd); 1074 } 1075 1076 parent_sw = tb_switch_find_by_route(tb, get_parent_route(route)); 1077 if (!parent_sw) { 1078 tb_err(tb, "failed to find parent switch for %llx\n", route); 1079 return; 1080 } 1081 1082 add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, 1083 sizeof(pkg->ep_name), pkg->connection_id, 1084 0, 0, 0, security_level, authorized, boot); 1085 1086 tb_switch_put(parent_sw); 1087 } 1088 1089 static void 1090 icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) 1091 { 1092 const struct icm_tr_event_device_disconnected *pkg = 1093 (const struct icm_tr_event_device_disconnected *)hdr; 1094 struct tb_switch *sw; 1095 u64 route; 1096 1097 route = get_route(pkg->route_hi, pkg->route_lo); 1098 1099 sw = tb_switch_find_by_route(tb, route); 1100 if (!sw) { 1101 tb_warn(tb, "no switch exists at %llx, ignoring\n", route); 1102 return; 1103 } 1104 1105 remove_switch(sw); 1106 tb_switch_put(sw); 1107 } 1108 1109 static void 1110 icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr) 1111 { 1112 const struct icm_tr_event_xdomain_connected *pkg = 1113 (const struct icm_tr_event_xdomain_connected *)hdr; 1114 struct tb_xdomain *xd; 1115 struct tb_switch *sw; 1116 u64 route; 1117 1118 if (!tb->root_switch) 1119 return; 1120 1121 route = get_route(pkg->local_route_hi, pkg->local_route_lo); 1122 1123 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); 1124 if (xd) { 1125 if (xd->route == route) { 1126 update_xdomain(xd, route, 0); 1127 tb_xdomain_put(xd); 1128 return; 1129 } 1130 1131 remove_xdomain(xd); 1132 tb_xdomain_put(xd); 1133 } 1134 1135 /* An existing xdomain with the same address */ 1136 xd = tb_xdomain_find_by_route(tb, route); 1137 if (xd) { 1138 remove_xdomain(xd); 1139 tb_xdomain_put(xd); 1140 } 1141 1142 /* 1143 * If the user disconnected a switch during suspend and 1144 * connected another host to the same port, remove the switch 1145 * first. 1146 */ 1147 sw = tb_switch_find_by_route(tb, route); 1148 if (sw) { 1149 remove_switch(sw); 1150 tb_switch_put(sw); 1151 } 1152 1153 sw = tb_switch_find_by_route(tb, get_parent_route(route)); 1154 if (!sw) { 1155 tb_warn(tb, "no switch exists at %llx, ignoring\n", route); 1156 return; 1157 } 1158 1159 add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0); 1160 tb_switch_put(sw); 1161 } 1162 1163 static void 1164 icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) 1165 { 1166 const struct icm_tr_event_xdomain_disconnected *pkg = 1167 (const struct icm_tr_event_xdomain_disconnected *)hdr; 1168 struct tb_xdomain *xd; 1169 u64 route; 1170 1171 route = get_route(pkg->route_hi, pkg->route_lo); 1172 1173 xd = tb_xdomain_find_by_route(tb, route); 1174 if (xd) { 1175 remove_xdomain(xd); 1176 tb_xdomain_put(xd); 1177 } 1178 } 1179 1180 static struct pci_dev *get_upstream_port(struct pci_dev *pdev) 1181 { 1182 struct pci_dev *parent; 1183 1184 parent = pci_upstream_bridge(pdev); 1185 while (parent) { 1186 if (!pci_is_pcie(parent)) 1187 return NULL; 1188 if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM) 1189 break; 1190 parent = pci_upstream_bridge(parent); 1191 } 1192 1193 if (!parent) 1194 return NULL; 1195 1196 switch (parent->device) { 1197 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: 1198 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: 1199 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: 1200 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: 1201 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: 1202 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: 1203 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: 1204 return parent; 1205 } 1206 1207 return NULL; 1208 } 1209 1210 static bool icm_ar_is_supported(struct tb *tb) 1211 { 1212 struct pci_dev *upstream_port; 1213 struct icm *icm = tb_priv(tb); 1214 1215 /* 1216 * Starting from Alpine Ridge we can use ICM on Apple machines 1217 * as well. We just need to reset and re-enable it first. 1218 */ 1219 if (!x86_apple_machine) 1220 return true; 1221 1222 /* 1223 * Find the upstream PCIe port in case we need to do reset 1224 * through its vendor specific registers. 1225 */ 1226 upstream_port = get_upstream_port(tb->nhi->pdev); 1227 if (upstream_port) { 1228 int cap; 1229 1230 cap = pci_find_ext_capability(upstream_port, 1231 PCI_EXT_CAP_ID_VNDR); 1232 if (cap > 0) { 1233 icm->upstream_port = upstream_port; 1234 icm->vnd_cap = cap; 1235 1236 return true; 1237 } 1238 } 1239 1240 return false; 1241 } 1242 1243 static int icm_ar_get_mode(struct tb *tb) 1244 { 1245 struct tb_nhi *nhi = tb->nhi; 1246 int retries = 60; 1247 u32 val; 1248 1249 do { 1250 val = ioread32(nhi->iobase + REG_FW_STS); 1251 if (val & REG_FW_STS_NVM_AUTH_DONE) 1252 break; 1253 msleep(50); 1254 } while (--retries); 1255 1256 if (!retries) { 1257 dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n"); 1258 return -ENODEV; 1259 } 1260 1261 return nhi_mailbox_mode(nhi); 1262 } 1263 1264 static int 1265 icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level, 1266 size_t *nboot_acl, bool *rpm) 1267 { 1268 struct icm_ar_pkg_driver_ready_response reply; 1269 struct icm_pkg_driver_ready request = { 1270 .hdr.code = ICM_DRIVER_READY, 1271 }; 1272 int ret; 1273 1274 memset(&reply, 0, sizeof(reply)); 1275 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1276 1, ICM_TIMEOUT); 1277 if (ret) 1278 return ret; 1279 1280 if (security_level) 1281 *security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK; 1282 if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED)) 1283 *nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >> 1284 ICM_AR_INFO_BOOT_ACL_SHIFT; 1285 if (rpm) 1286 *rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3); 1287 1288 return 0; 1289 } 1290 1291 static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) 1292 { 1293 struct icm_ar_pkg_get_route_response reply; 1294 struct icm_ar_pkg_get_route request = { 1295 .hdr = { .code = ICM_GET_ROUTE }, 1296 .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link, 1297 }; 1298 int ret; 1299 1300 memset(&reply, 0, sizeof(reply)); 1301 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1302 1, ICM_TIMEOUT); 1303 if (ret) 1304 return ret; 1305 1306 if (reply.hdr.flags & ICM_FLAGS_ERROR) 1307 return -EIO; 1308 1309 *route = get_route(reply.route_hi, reply.route_lo); 1310 return 0; 1311 } 1312 1313 static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids) 1314 { 1315 struct icm_ar_pkg_preboot_acl_response reply; 1316 struct icm_ar_pkg_preboot_acl request = { 1317 .hdr = { .code = ICM_PREBOOT_ACL }, 1318 }; 1319 int ret, i; 1320 1321 memset(&reply, 0, sizeof(reply)); 1322 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1323 1, ICM_TIMEOUT); 1324 if (ret) 1325 return ret; 1326 1327 if (reply.hdr.flags & ICM_FLAGS_ERROR) 1328 return -EIO; 1329 1330 for (i = 0; i < nuuids; i++) { 1331 u32 *uuid = (u32 *)&uuids[i]; 1332 1333 uuid[0] = reply.acl[i].uuid_lo; 1334 uuid[1] = reply.acl[i].uuid_hi; 1335 1336 if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) { 1337 /* Map empty entries to null UUID */ 1338 uuid[0] = 0; 1339 uuid[1] = 0; 1340 } else if (uuid[0] != 0 || uuid[1] != 0) { 1341 /* Upper two DWs are always one's */ 1342 uuid[2] = 0xffffffff; 1343 uuid[3] = 0xffffffff; 1344 } 1345 } 1346 1347 return ret; 1348 } 1349 1350 static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids, 1351 size_t nuuids) 1352 { 1353 struct icm_ar_pkg_preboot_acl_response reply; 1354 struct icm_ar_pkg_preboot_acl request = { 1355 .hdr = { 1356 .code = ICM_PREBOOT_ACL, 1357 .flags = ICM_FLAGS_WRITE, 1358 }, 1359 }; 1360 int ret, i; 1361 1362 for (i = 0; i < nuuids; i++) { 1363 const u32 *uuid = (const u32 *)&uuids[i]; 1364 1365 if (uuid_is_null(&uuids[i])) { 1366 /* 1367 * Map null UUID to the empty (all one) entries 1368 * for ICM. 1369 */ 1370 request.acl[i].uuid_lo = 0xffffffff; 1371 request.acl[i].uuid_hi = 0xffffffff; 1372 } else { 1373 /* Two high DWs need to be set to all one */ 1374 if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff) 1375 return -EINVAL; 1376 1377 request.acl[i].uuid_lo = uuid[0]; 1378 request.acl[i].uuid_hi = uuid[1]; 1379 } 1380 } 1381 1382 memset(&reply, 0, sizeof(reply)); 1383 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1384 1, ICM_TIMEOUT); 1385 if (ret) 1386 return ret; 1387 1388 if (reply.hdr.flags & ICM_FLAGS_ERROR) 1389 return -EIO; 1390 1391 return 0; 1392 } 1393 1394 static void icm_handle_notification(struct work_struct *work) 1395 { 1396 struct icm_notification *n = container_of(work, typeof(*n), work); 1397 struct tb *tb = n->tb; 1398 struct icm *icm = tb_priv(tb); 1399 1400 mutex_lock(&tb->lock); 1401 1402 /* 1403 * When the domain is stopped we flush its workqueue but before 1404 * that the root switch is removed. In that case we should treat 1405 * the queued events as being canceled. 1406 */ 1407 if (tb->root_switch) { 1408 switch (n->pkg->code) { 1409 case ICM_EVENT_DEVICE_CONNECTED: 1410 icm->device_connected(tb, n->pkg); 1411 break; 1412 case ICM_EVENT_DEVICE_DISCONNECTED: 1413 icm->device_disconnected(tb, n->pkg); 1414 break; 1415 case ICM_EVENT_XDOMAIN_CONNECTED: 1416 icm->xdomain_connected(tb, n->pkg); 1417 break; 1418 case ICM_EVENT_XDOMAIN_DISCONNECTED: 1419 icm->xdomain_disconnected(tb, n->pkg); 1420 break; 1421 } 1422 } 1423 1424 mutex_unlock(&tb->lock); 1425 1426 kfree(n->pkg); 1427 kfree(n); 1428 } 1429 1430 static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, 1431 const void *buf, size_t size) 1432 { 1433 struct icm_notification *n; 1434 1435 n = kmalloc(sizeof(*n), GFP_KERNEL); 1436 if (!n) 1437 return; 1438 1439 INIT_WORK(&n->work, icm_handle_notification); 1440 n->pkg = kmemdup(buf, size, GFP_KERNEL); 1441 n->tb = tb; 1442 1443 queue_work(tb->wq, &n->work); 1444 } 1445 1446 static int 1447 __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level, 1448 size_t *nboot_acl, bool *rpm) 1449 { 1450 struct icm *icm = tb_priv(tb); 1451 unsigned int retries = 50; 1452 int ret; 1453 1454 ret = icm->driver_ready(tb, security_level, nboot_acl, rpm); 1455 if (ret) { 1456 tb_err(tb, "failed to send driver ready to ICM\n"); 1457 return ret; 1458 } 1459 1460 /* 1461 * Hold on here until the switch config space is accessible so 1462 * that we can read root switch config successfully. 1463 */ 1464 do { 1465 struct tb_cfg_result res; 1466 u32 tmp; 1467 1468 res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH, 1469 0, 1, 100); 1470 if (!res.err) 1471 return 0; 1472 1473 msleep(50); 1474 } while (--retries); 1475 1476 tb_err(tb, "failed to read root switch config space, giving up\n"); 1477 return -ETIMEDOUT; 1478 } 1479 1480 static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec) 1481 { 1482 unsigned long end = jiffies + msecs_to_jiffies(timeout_msec); 1483 u32 cmd; 1484 1485 do { 1486 pci_read_config_dword(icm->upstream_port, 1487 icm->vnd_cap + PCIE2CIO_CMD, &cmd); 1488 if (!(cmd & PCIE2CIO_CMD_START)) { 1489 if (cmd & PCIE2CIO_CMD_TIMEOUT) 1490 break; 1491 return 0; 1492 } 1493 1494 msleep(50); 1495 } while (time_before(jiffies, end)); 1496 1497 return -ETIMEDOUT; 1498 } 1499 1500 static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs, 1501 unsigned int port, unsigned int index, u32 *data) 1502 { 1503 struct pci_dev *pdev = icm->upstream_port; 1504 int ret, vnd_cap = icm->vnd_cap; 1505 u32 cmd; 1506 1507 cmd = index; 1508 cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; 1509 cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; 1510 cmd |= PCIE2CIO_CMD_START; 1511 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); 1512 1513 ret = pci2cio_wait_completion(icm, 5000); 1514 if (ret) 1515 return ret; 1516 1517 pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data); 1518 return 0; 1519 } 1520 1521 static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs, 1522 unsigned int port, unsigned int index, u32 data) 1523 { 1524 struct pci_dev *pdev = icm->upstream_port; 1525 int vnd_cap = icm->vnd_cap; 1526 u32 cmd; 1527 1528 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data); 1529 1530 cmd = index; 1531 cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; 1532 cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; 1533 cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START; 1534 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); 1535 1536 return pci2cio_wait_completion(icm, 5000); 1537 } 1538 1539 static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi) 1540 { 1541 struct icm *icm = tb_priv(tb); 1542 u32 val; 1543 1544 if (!icm->upstream_port) 1545 return -ENODEV; 1546 1547 /* Put ARC to wait for CIO reset event to happen */ 1548 val = ioread32(nhi->iobase + REG_FW_STS); 1549 val |= REG_FW_STS_CIO_RESET_REQ; 1550 iowrite32(val, nhi->iobase + REG_FW_STS); 1551 1552 /* Re-start ARC */ 1553 val = ioread32(nhi->iobase + REG_FW_STS); 1554 val |= REG_FW_STS_ICM_EN_INVERT; 1555 val |= REG_FW_STS_ICM_EN_CPU; 1556 iowrite32(val, nhi->iobase + REG_FW_STS); 1557 1558 /* Trigger CIO reset now */ 1559 return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9)); 1560 } 1561 1562 static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi) 1563 { 1564 unsigned int retries = 10; 1565 int ret; 1566 u32 val; 1567 1568 /* Check if the ICM firmware is already running */ 1569 val = ioread32(nhi->iobase + REG_FW_STS); 1570 if (val & REG_FW_STS_ICM_EN) 1571 return 0; 1572 1573 dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n"); 1574 1575 ret = icm_firmware_reset(tb, nhi); 1576 if (ret) 1577 return ret; 1578 1579 /* Wait until the ICM firmware tells us it is up and running */ 1580 do { 1581 /* Check that the ICM firmware is running */ 1582 val = ioread32(nhi->iobase + REG_FW_STS); 1583 if (val & REG_FW_STS_NVM_AUTH_DONE) 1584 return 0; 1585 1586 msleep(300); 1587 } while (--retries); 1588 1589 return -ETIMEDOUT; 1590 } 1591 1592 static int icm_reset_phy_port(struct tb *tb, int phy_port) 1593 { 1594 struct icm *icm = tb_priv(tb); 1595 u32 state0, state1; 1596 int port0, port1; 1597 u32 val0, val1; 1598 int ret; 1599 1600 if (!icm->upstream_port) 1601 return 0; 1602 1603 if (phy_port) { 1604 port0 = 3; 1605 port1 = 4; 1606 } else { 1607 port0 = 1; 1608 port1 = 2; 1609 } 1610 1611 /* 1612 * Read link status of both null ports belonging to a single 1613 * physical port. 1614 */ 1615 ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0); 1616 if (ret) 1617 return ret; 1618 ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1); 1619 if (ret) 1620 return ret; 1621 1622 state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK; 1623 state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT; 1624 state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK; 1625 state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT; 1626 1627 /* If they are both up we need to reset them now */ 1628 if (state0 != TB_PORT_UP || state1 != TB_PORT_UP) 1629 return 0; 1630 1631 val0 |= PHY_PORT_CS1_LINK_DISABLE; 1632 ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0); 1633 if (ret) 1634 return ret; 1635 1636 val1 |= PHY_PORT_CS1_LINK_DISABLE; 1637 ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1); 1638 if (ret) 1639 return ret; 1640 1641 /* Wait a bit and then re-enable both ports */ 1642 usleep_range(10, 100); 1643 1644 ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0); 1645 if (ret) 1646 return ret; 1647 ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1); 1648 if (ret) 1649 return ret; 1650 1651 val0 &= ~PHY_PORT_CS1_LINK_DISABLE; 1652 ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0); 1653 if (ret) 1654 return ret; 1655 1656 val1 &= ~PHY_PORT_CS1_LINK_DISABLE; 1657 return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1); 1658 } 1659 1660 static int icm_firmware_init(struct tb *tb) 1661 { 1662 struct icm *icm = tb_priv(tb); 1663 struct tb_nhi *nhi = tb->nhi; 1664 int ret; 1665 1666 ret = icm_firmware_start(tb, nhi); 1667 if (ret) { 1668 dev_err(&nhi->pdev->dev, "could not start ICM firmware\n"); 1669 return ret; 1670 } 1671 1672 if (icm->get_mode) { 1673 ret = icm->get_mode(tb); 1674 1675 switch (ret) { 1676 case NHI_FW_SAFE_MODE: 1677 icm->safe_mode = true; 1678 break; 1679 1680 case NHI_FW_CM_MODE: 1681 /* Ask ICM to accept all Thunderbolt devices */ 1682 nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0); 1683 break; 1684 1685 default: 1686 if (ret < 0) 1687 return ret; 1688 1689 tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret); 1690 return -ENODEV; 1691 } 1692 } 1693 1694 /* 1695 * Reset both physical ports if there is anything connected to 1696 * them already. 1697 */ 1698 ret = icm_reset_phy_port(tb, 0); 1699 if (ret) 1700 dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n"); 1701 ret = icm_reset_phy_port(tb, 1); 1702 if (ret) 1703 dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n"); 1704 1705 return 0; 1706 } 1707 1708 static int icm_driver_ready(struct tb *tb) 1709 { 1710 struct icm *icm = tb_priv(tb); 1711 int ret; 1712 1713 ret = icm_firmware_init(tb); 1714 if (ret) 1715 return ret; 1716 1717 if (icm->safe_mode) { 1718 tb_info(tb, "Thunderbolt host controller is in safe mode.\n"); 1719 tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n"); 1720 tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n"); 1721 return 0; 1722 } 1723 1724 ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl, 1725 &icm->rpm); 1726 if (ret) 1727 return ret; 1728 1729 /* 1730 * Make sure the number of supported preboot ACL matches what we 1731 * expect or disable the whole feature. 1732 */ 1733 if (tb->nboot_acl > icm->max_boot_acl) 1734 tb->nboot_acl = 0; 1735 1736 return 0; 1737 } 1738 1739 static int icm_suspend(struct tb *tb) 1740 { 1741 struct icm *icm = tb_priv(tb); 1742 1743 if (icm->save_devices) 1744 icm->save_devices(tb); 1745 1746 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); 1747 return 0; 1748 } 1749 1750 /* 1751 * Mark all switches (except root switch) below this one unplugged. ICM 1752 * firmware will send us an updated list of switches after we have send 1753 * it driver ready command. If a switch is not in that list it will be 1754 * removed when we perform rescan. 1755 */ 1756 static void icm_unplug_children(struct tb_switch *sw) 1757 { 1758 unsigned int i; 1759 1760 if (tb_route(sw)) 1761 sw->is_unplugged = true; 1762 1763 for (i = 1; i <= sw->config.max_port_number; i++) { 1764 struct tb_port *port = &sw->ports[i]; 1765 1766 if (port->xdomain) 1767 port->xdomain->is_unplugged = true; 1768 else if (tb_port_has_remote(port)) 1769 icm_unplug_children(port->remote->sw); 1770 } 1771 } 1772 1773 static void icm_free_unplugged_children(struct tb_switch *sw) 1774 { 1775 unsigned int i; 1776 1777 for (i = 1; i <= sw->config.max_port_number; i++) { 1778 struct tb_port *port = &sw->ports[i]; 1779 1780 if (port->xdomain && port->xdomain->is_unplugged) { 1781 tb_xdomain_remove(port->xdomain); 1782 port->xdomain = NULL; 1783 } else if (tb_port_has_remote(port)) { 1784 if (port->remote->sw->is_unplugged) { 1785 tb_switch_remove(port->remote->sw); 1786 port->remote = NULL; 1787 } else { 1788 icm_free_unplugged_children(port->remote->sw); 1789 } 1790 } 1791 } 1792 } 1793 1794 static void icm_rescan_work(struct work_struct *work) 1795 { 1796 struct icm *icm = container_of(work, struct icm, rescan_work.work); 1797 struct tb *tb = icm_to_tb(icm); 1798 1799 mutex_lock(&tb->lock); 1800 if (tb->root_switch) 1801 icm_free_unplugged_children(tb->root_switch); 1802 mutex_unlock(&tb->lock); 1803 } 1804 1805 static void icm_complete(struct tb *tb) 1806 { 1807 struct icm *icm = tb_priv(tb); 1808 1809 if (tb->nhi->going_away) 1810 return; 1811 1812 icm_unplug_children(tb->root_switch); 1813 1814 /* 1815 * Now all existing children should be resumed, start events 1816 * from ICM to get updated status. 1817 */ 1818 __icm_driver_ready(tb, NULL, NULL, NULL); 1819 1820 /* 1821 * We do not get notifications of devices that have been 1822 * unplugged during suspend so schedule rescan to clean them up 1823 * if any. 1824 */ 1825 queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500)); 1826 } 1827 1828 static int icm_runtime_suspend(struct tb *tb) 1829 { 1830 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); 1831 return 0; 1832 } 1833 1834 static int icm_runtime_resume(struct tb *tb) 1835 { 1836 /* 1837 * We can reuse the same resume functionality than with system 1838 * suspend. 1839 */ 1840 icm_complete(tb); 1841 return 0; 1842 } 1843 1844 static int icm_start(struct tb *tb) 1845 { 1846 struct icm *icm = tb_priv(tb); 1847 int ret; 1848 1849 if (icm->safe_mode) 1850 tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0); 1851 else 1852 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); 1853 if (IS_ERR(tb->root_switch)) 1854 return PTR_ERR(tb->root_switch); 1855 1856 /* 1857 * NVM upgrade has not been tested on Apple systems and they 1858 * don't provide images publicly either. To be on the safe side 1859 * prevent root switch NVM upgrade on Macs for now. 1860 */ 1861 tb->root_switch->no_nvm_upgrade = x86_apple_machine; 1862 tb->root_switch->rpm = icm->rpm; 1863 1864 ret = tb_switch_add(tb->root_switch); 1865 if (ret) { 1866 tb_switch_put(tb->root_switch); 1867 tb->root_switch = NULL; 1868 } 1869 1870 return ret; 1871 } 1872 1873 static void icm_stop(struct tb *tb) 1874 { 1875 struct icm *icm = tb_priv(tb); 1876 1877 cancel_delayed_work(&icm->rescan_work); 1878 tb_switch_remove(tb->root_switch); 1879 tb->root_switch = NULL; 1880 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); 1881 } 1882 1883 static int icm_disconnect_pcie_paths(struct tb *tb) 1884 { 1885 return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0); 1886 } 1887 1888 /* Falcon Ridge */ 1889 static const struct tb_cm_ops icm_fr_ops = { 1890 .driver_ready = icm_driver_ready, 1891 .start = icm_start, 1892 .stop = icm_stop, 1893 .suspend = icm_suspend, 1894 .complete = icm_complete, 1895 .handle_event = icm_handle_event, 1896 .approve_switch = icm_fr_approve_switch, 1897 .add_switch_key = icm_fr_add_switch_key, 1898 .challenge_switch_key = icm_fr_challenge_switch_key, 1899 .disconnect_pcie_paths = icm_disconnect_pcie_paths, 1900 .approve_xdomain_paths = icm_fr_approve_xdomain_paths, 1901 .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths, 1902 }; 1903 1904 /* Alpine Ridge */ 1905 static const struct tb_cm_ops icm_ar_ops = { 1906 .driver_ready = icm_driver_ready, 1907 .start = icm_start, 1908 .stop = icm_stop, 1909 .suspend = icm_suspend, 1910 .complete = icm_complete, 1911 .runtime_suspend = icm_runtime_suspend, 1912 .runtime_resume = icm_runtime_resume, 1913 .handle_event = icm_handle_event, 1914 .get_boot_acl = icm_ar_get_boot_acl, 1915 .set_boot_acl = icm_ar_set_boot_acl, 1916 .approve_switch = icm_fr_approve_switch, 1917 .add_switch_key = icm_fr_add_switch_key, 1918 .challenge_switch_key = icm_fr_challenge_switch_key, 1919 .disconnect_pcie_paths = icm_disconnect_pcie_paths, 1920 .approve_xdomain_paths = icm_fr_approve_xdomain_paths, 1921 .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths, 1922 }; 1923 1924 /* Titan Ridge */ 1925 static const struct tb_cm_ops icm_tr_ops = { 1926 .driver_ready = icm_driver_ready, 1927 .start = icm_start, 1928 .stop = icm_stop, 1929 .suspend = icm_suspend, 1930 .complete = icm_complete, 1931 .runtime_suspend = icm_runtime_suspend, 1932 .runtime_resume = icm_runtime_resume, 1933 .handle_event = icm_handle_event, 1934 .get_boot_acl = icm_ar_get_boot_acl, 1935 .set_boot_acl = icm_ar_set_boot_acl, 1936 .approve_switch = icm_tr_approve_switch, 1937 .add_switch_key = icm_tr_add_switch_key, 1938 .challenge_switch_key = icm_tr_challenge_switch_key, 1939 .disconnect_pcie_paths = icm_disconnect_pcie_paths, 1940 .approve_xdomain_paths = icm_tr_approve_xdomain_paths, 1941 .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, 1942 }; 1943 1944 struct tb *icm_probe(struct tb_nhi *nhi) 1945 { 1946 struct icm *icm; 1947 struct tb *tb; 1948 1949 tb = tb_domain_alloc(nhi, sizeof(struct icm)); 1950 if (!tb) 1951 return NULL; 1952 1953 icm = tb_priv(tb); 1954 INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work); 1955 mutex_init(&icm->request_lock); 1956 1957 switch (nhi->pdev->device) { 1958 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: 1959 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: 1960 icm->is_supported = icm_fr_is_supported; 1961 icm->get_route = icm_fr_get_route; 1962 icm->save_devices = icm_fr_save_devices; 1963 icm->driver_ready = icm_fr_driver_ready; 1964 icm->device_connected = icm_fr_device_connected; 1965 icm->device_disconnected = icm_fr_device_disconnected; 1966 icm->xdomain_connected = icm_fr_xdomain_connected; 1967 icm->xdomain_disconnected = icm_fr_xdomain_disconnected; 1968 tb->cm_ops = &icm_fr_ops; 1969 break; 1970 1971 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI: 1972 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI: 1973 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI: 1974 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI: 1975 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI: 1976 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; 1977 icm->is_supported = icm_ar_is_supported; 1978 icm->get_mode = icm_ar_get_mode; 1979 icm->get_route = icm_ar_get_route; 1980 icm->save_devices = icm_fr_save_devices; 1981 icm->driver_ready = icm_ar_driver_ready; 1982 icm->device_connected = icm_fr_device_connected; 1983 icm->device_disconnected = icm_fr_device_disconnected; 1984 icm->xdomain_connected = icm_fr_xdomain_connected; 1985 icm->xdomain_disconnected = icm_fr_xdomain_disconnected; 1986 tb->cm_ops = &icm_ar_ops; 1987 break; 1988 1989 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI: 1990 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI: 1991 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; 1992 icm->is_supported = icm_ar_is_supported; 1993 icm->get_mode = icm_ar_get_mode; 1994 icm->driver_ready = icm_tr_driver_ready; 1995 icm->device_connected = icm_tr_device_connected; 1996 icm->device_disconnected = icm_tr_device_disconnected; 1997 icm->xdomain_connected = icm_tr_xdomain_connected; 1998 icm->xdomain_disconnected = icm_tr_xdomain_disconnected; 1999 tb->cm_ops = &icm_tr_ops; 2000 break; 2001 } 2002 2003 if (!icm->is_supported || !icm->is_supported(tb)) { 2004 dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n"); 2005 tb_domain_put(tb); 2006 return NULL; 2007 } 2008 2009 return tb; 2010 } 2011