1 /* 2 * Internal Thunderbolt Connection Manager. This is a firmware running on 3 * the Thunderbolt host controller performing most of the low-level 4 * handling. 5 * 6 * Copyright (C) 2017, Intel Corporation 7 * Authors: Michael Jamet <michael.jamet@intel.com> 8 * Mika Westerberg <mika.westerberg@linux.intel.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15 #include <linux/delay.h> 16 #include <linux/mutex.h> 17 #include <linux/pci.h> 18 #include <linux/platform_data/x86/apple.h> 19 #include <linux/sizes.h> 20 #include <linux/slab.h> 21 #include <linux/workqueue.h> 22 23 #include "ctl.h" 24 #include "nhi_regs.h" 25 #include "tb.h" 26 27 #define PCIE2CIO_CMD 0x30 28 #define PCIE2CIO_CMD_TIMEOUT BIT(31) 29 #define PCIE2CIO_CMD_START BIT(30) 30 #define PCIE2CIO_CMD_WRITE BIT(21) 31 #define PCIE2CIO_CMD_CS_MASK GENMASK(20, 19) 32 #define PCIE2CIO_CMD_CS_SHIFT 19 33 #define PCIE2CIO_CMD_PORT_MASK GENMASK(18, 13) 34 #define PCIE2CIO_CMD_PORT_SHIFT 13 35 36 #define PCIE2CIO_WRDATA 0x34 37 #define PCIE2CIO_RDDATA 0x38 38 39 #define PHY_PORT_CS1 0x37 40 #define PHY_PORT_CS1_LINK_DISABLE BIT(14) 41 #define PHY_PORT_CS1_LINK_STATE_MASK GENMASK(29, 26) 42 #define PHY_PORT_CS1_LINK_STATE_SHIFT 26 43 44 #define ICM_TIMEOUT 5000 /* ms */ 45 #define ICM_MAX_LINK 4 46 #define ICM_MAX_DEPTH 6 47 48 /** 49 * struct icm - Internal connection manager private data 50 * @request_lock: Makes sure only one message is send to ICM at time 51 * @rescan_work: Work used to rescan the surviving switches after resume 52 * @upstream_port: Pointer to the PCIe upstream port this host 53 * controller is connected. This is only set for systems 54 * where ICM needs to be started manually 55 * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides 56 * (only set when @upstream_port is not %NULL) 57 * @safe_mode: ICM is in safe mode 58 * @is_supported: Checks if we can support ICM on this controller 59 * @get_mode: Read and return the ICM firmware mode (optional) 60 * @get_route: Find a route string for given switch 61 * @device_connected: Handle device connected ICM message 62 * @device_disconnected: Handle device disconnected ICM message 63 */ 64 struct icm { 65 struct mutex request_lock; 66 struct delayed_work rescan_work; 67 struct pci_dev *upstream_port; 68 int vnd_cap; 69 bool safe_mode; 70 bool (*is_supported)(struct tb *tb); 71 int (*get_mode)(struct tb *tb); 72 int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route); 73 void (*device_connected)(struct tb *tb, 74 const struct icm_pkg_header *hdr); 75 void (*device_disconnected)(struct tb *tb, 76 const struct icm_pkg_header *hdr); 77 }; 78 79 struct icm_notification { 80 struct work_struct work; 81 struct icm_pkg_header *pkg; 82 struct tb *tb; 83 }; 84 85 static inline struct tb *icm_to_tb(struct icm *icm) 86 { 87 return ((void *)icm - sizeof(struct tb)); 88 } 89 90 static inline u8 phy_port_from_route(u64 route, u8 depth) 91 { 92 return tb_switch_phy_port_from_link(route >> ((depth - 1) * 8)); 93 } 94 95 static inline u8 dual_link_from_link(u8 link) 96 { 97 return link ? ((link - 1) ^ 0x01) + 1 : 0; 98 } 99 100 static inline u64 get_route(u32 route_hi, u32 route_lo) 101 { 102 return (u64)route_hi << 32 | route_lo; 103 } 104 105 static bool icm_match(const struct tb_cfg_request *req, 106 const struct ctl_pkg *pkg) 107 { 108 const struct icm_pkg_header *res_hdr = pkg->buffer; 109 const struct icm_pkg_header *req_hdr = req->request; 110 111 if (pkg->frame.eof != req->response_type) 112 return false; 113 if (res_hdr->code != req_hdr->code) 114 return false; 115 116 return true; 117 } 118 119 static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) 120 { 121 const struct icm_pkg_header *hdr = pkg->buffer; 122 123 if (hdr->packet_id < req->npackets) { 124 size_t offset = hdr->packet_id * req->response_size; 125 126 memcpy(req->response + offset, pkg->buffer, req->response_size); 127 } 128 129 return hdr->packet_id == hdr->total_packets - 1; 130 } 131 132 static int icm_request(struct tb *tb, const void *request, size_t request_size, 133 void *response, size_t response_size, size_t npackets, 134 unsigned int timeout_msec) 135 { 136 struct icm *icm = tb_priv(tb); 137 int retries = 3; 138 139 do { 140 struct tb_cfg_request *req; 141 struct tb_cfg_result res; 142 143 req = tb_cfg_request_alloc(); 144 if (!req) 145 return -ENOMEM; 146 147 req->match = icm_match; 148 req->copy = icm_copy; 149 req->request = request; 150 req->request_size = request_size; 151 req->request_type = TB_CFG_PKG_ICM_CMD; 152 req->response = response; 153 req->npackets = npackets; 154 req->response_size = response_size; 155 req->response_type = TB_CFG_PKG_ICM_RESP; 156 157 mutex_lock(&icm->request_lock); 158 res = tb_cfg_request_sync(tb->ctl, req, timeout_msec); 159 mutex_unlock(&icm->request_lock); 160 161 tb_cfg_request_put(req); 162 163 if (res.err != -ETIMEDOUT) 164 return res.err == 1 ? -EIO : res.err; 165 166 usleep_range(20, 50); 167 } while (retries--); 168 169 return -ETIMEDOUT; 170 } 171 172 static bool icm_fr_is_supported(struct tb *tb) 173 { 174 return !x86_apple_machine; 175 } 176 177 static inline int icm_fr_get_switch_index(u32 port) 178 { 179 int index; 180 181 if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT) 182 return 0; 183 184 index = port >> ICM_PORT_INDEX_SHIFT; 185 return index != 0xff ? index : 0; 186 } 187 188 static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) 189 { 190 struct icm_fr_pkg_get_topology_response *switches, *sw; 191 struct icm_fr_pkg_get_topology request = { 192 .hdr = { .code = ICM_GET_TOPOLOGY }, 193 }; 194 size_t npackets = ICM_GET_TOPOLOGY_PACKETS; 195 int ret, index; 196 u8 i; 197 198 switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL); 199 if (!switches) 200 return -ENOMEM; 201 202 ret = icm_request(tb, &request, sizeof(request), switches, 203 sizeof(*switches), npackets, ICM_TIMEOUT); 204 if (ret) 205 goto err_free; 206 207 sw = &switches[0]; 208 index = icm_fr_get_switch_index(sw->ports[link]); 209 if (!index) { 210 ret = -ENODEV; 211 goto err_free; 212 } 213 214 sw = &switches[index]; 215 for (i = 1; i < depth; i++) { 216 unsigned int j; 217 218 if (!(sw->first_data & ICM_SWITCH_USED)) { 219 ret = -ENODEV; 220 goto err_free; 221 } 222 223 for (j = 0; j < ARRAY_SIZE(sw->ports); j++) { 224 index = icm_fr_get_switch_index(sw->ports[j]); 225 if (index > sw->switch_index) { 226 sw = &switches[index]; 227 break; 228 } 229 } 230 } 231 232 *route = get_route(sw->route_hi, sw->route_lo); 233 234 err_free: 235 kfree(switches); 236 return ret; 237 } 238 239 static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw) 240 { 241 struct icm_fr_pkg_approve_device request; 242 struct icm_fr_pkg_approve_device reply; 243 int ret; 244 245 memset(&request, 0, sizeof(request)); 246 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 247 request.hdr.code = ICM_APPROVE_DEVICE; 248 request.connection_id = sw->connection_id; 249 request.connection_key = sw->connection_key; 250 251 memset(&reply, 0, sizeof(reply)); 252 /* Use larger timeout as establishing tunnels can take some time */ 253 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 254 1, 10000); 255 if (ret) 256 return ret; 257 258 if (reply.hdr.flags & ICM_FLAGS_ERROR) { 259 tb_warn(tb, "PCIe tunnel creation failed\n"); 260 return -EIO; 261 } 262 263 return 0; 264 } 265 266 static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw) 267 { 268 struct icm_fr_pkg_add_device_key request; 269 struct icm_fr_pkg_add_device_key_response reply; 270 int ret; 271 272 memset(&request, 0, sizeof(request)); 273 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 274 request.hdr.code = ICM_ADD_DEVICE_KEY; 275 request.connection_id = sw->connection_id; 276 request.connection_key = sw->connection_key; 277 memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); 278 279 memset(&reply, 0, sizeof(reply)); 280 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 281 1, ICM_TIMEOUT); 282 if (ret) 283 return ret; 284 285 if (reply.hdr.flags & ICM_FLAGS_ERROR) { 286 tb_warn(tb, "Adding key to switch failed\n"); 287 return -EIO; 288 } 289 290 return 0; 291 } 292 293 static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, 294 const u8 *challenge, u8 *response) 295 { 296 struct icm_fr_pkg_challenge_device request; 297 struct icm_fr_pkg_challenge_device_response reply; 298 int ret; 299 300 memset(&request, 0, sizeof(request)); 301 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); 302 request.hdr.code = ICM_CHALLENGE_DEVICE; 303 request.connection_id = sw->connection_id; 304 request.connection_key = sw->connection_key; 305 memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE); 306 307 memset(&reply, 0, sizeof(reply)); 308 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 309 1, ICM_TIMEOUT); 310 if (ret) 311 return ret; 312 313 if (reply.hdr.flags & ICM_FLAGS_ERROR) 314 return -EKEYREJECTED; 315 if (reply.hdr.flags & ICM_FLAGS_NO_KEY) 316 return -ENOKEY; 317 318 memcpy(response, reply.response, TB_SWITCH_KEY_SIZE); 319 320 return 0; 321 } 322 323 static void remove_switch(struct tb_switch *sw) 324 { 325 struct tb_switch *parent_sw; 326 327 parent_sw = tb_to_switch(sw->dev.parent); 328 tb_port_at(tb_route(sw), parent_sw)->remote = NULL; 329 tb_switch_remove(sw); 330 } 331 332 static void 333 icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) 334 { 335 const struct icm_fr_event_device_connected *pkg = 336 (const struct icm_fr_event_device_connected *)hdr; 337 struct tb_switch *sw, *parent_sw; 338 struct icm *icm = tb_priv(tb); 339 bool authorized = false; 340 u8 link, depth; 341 u64 route; 342 int ret; 343 344 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; 345 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> 346 ICM_LINK_INFO_DEPTH_SHIFT; 347 authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; 348 349 ret = icm->get_route(tb, link, depth, &route); 350 if (ret) { 351 tb_err(tb, "failed to find route string for switch at %u.%u\n", 352 link, depth); 353 return; 354 } 355 356 sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid); 357 if (sw) { 358 u8 phy_port, sw_phy_port; 359 360 parent_sw = tb_to_switch(sw->dev.parent); 361 sw_phy_port = phy_port_from_route(tb_route(sw), sw->depth); 362 phy_port = phy_port_from_route(route, depth); 363 364 /* 365 * On resume ICM will send us connected events for the 366 * devices that still are present. However, that 367 * information might have changed for example by the 368 * fact that a switch on a dual-link connection might 369 * have been enumerated using the other link now. Make 370 * sure our book keeping matches that. 371 */ 372 if (sw->depth == depth && sw_phy_port == phy_port && 373 !!sw->authorized == authorized) { 374 tb_port_at(tb_route(sw), parent_sw)->remote = NULL; 375 tb_port_at(route, parent_sw)->remote = 376 tb_upstream_port(sw); 377 sw->config.route_hi = upper_32_bits(route); 378 sw->config.route_lo = lower_32_bits(route); 379 sw->connection_id = pkg->connection_id; 380 sw->connection_key = pkg->connection_key; 381 sw->link = link; 382 sw->depth = depth; 383 sw->is_unplugged = false; 384 tb_switch_put(sw); 385 return; 386 } 387 388 /* 389 * User connected the same switch to another physical 390 * port or to another part of the topology. Remove the 391 * existing switch now before adding the new one. 392 */ 393 remove_switch(sw); 394 tb_switch_put(sw); 395 } 396 397 /* 398 * If the switch was not found by UUID, look for a switch on 399 * same physical port (taking possible link aggregation into 400 * account) and depth. If we found one it is definitely a stale 401 * one so remove it first. 402 */ 403 sw = tb_switch_find_by_link_depth(tb, link, depth); 404 if (!sw) { 405 u8 dual_link; 406 407 dual_link = dual_link_from_link(link); 408 if (dual_link) 409 sw = tb_switch_find_by_link_depth(tb, dual_link, depth); 410 } 411 if (sw) { 412 remove_switch(sw); 413 tb_switch_put(sw); 414 } 415 416 parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1); 417 if (!parent_sw) { 418 tb_err(tb, "failed to find parent switch for %u.%u\n", 419 link, depth); 420 return; 421 } 422 423 sw = tb_switch_alloc(tb, &parent_sw->dev, route); 424 if (!sw) { 425 tb_switch_put(parent_sw); 426 return; 427 } 428 429 sw->uuid = kmemdup(&pkg->ep_uuid, sizeof(pkg->ep_uuid), GFP_KERNEL); 430 sw->connection_id = pkg->connection_id; 431 sw->connection_key = pkg->connection_key; 432 sw->link = link; 433 sw->depth = depth; 434 sw->authorized = authorized; 435 sw->security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> 436 ICM_FLAGS_SLEVEL_SHIFT; 437 438 /* Link the two switches now */ 439 tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); 440 tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw); 441 442 ret = tb_switch_add(sw); 443 if (ret) { 444 tb_port_at(tb_route(sw), parent_sw)->remote = NULL; 445 tb_switch_put(sw); 446 } 447 tb_switch_put(parent_sw); 448 } 449 450 static void 451 icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) 452 { 453 const struct icm_fr_event_device_disconnected *pkg = 454 (const struct icm_fr_event_device_disconnected *)hdr; 455 struct tb_switch *sw; 456 u8 link, depth; 457 458 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; 459 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> 460 ICM_LINK_INFO_DEPTH_SHIFT; 461 462 if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) { 463 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth); 464 return; 465 } 466 467 sw = tb_switch_find_by_link_depth(tb, link, depth); 468 if (!sw) { 469 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link, 470 depth); 471 return; 472 } 473 474 remove_switch(sw); 475 tb_switch_put(sw); 476 } 477 478 static struct pci_dev *get_upstream_port(struct pci_dev *pdev) 479 { 480 struct pci_dev *parent; 481 482 parent = pci_upstream_bridge(pdev); 483 while (parent) { 484 if (!pci_is_pcie(parent)) 485 return NULL; 486 if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM) 487 break; 488 parent = pci_upstream_bridge(parent); 489 } 490 491 if (!parent) 492 return NULL; 493 494 switch (parent->device) { 495 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: 496 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: 497 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: 498 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: 499 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: 500 return parent; 501 } 502 503 return NULL; 504 } 505 506 static bool icm_ar_is_supported(struct tb *tb) 507 { 508 struct pci_dev *upstream_port; 509 struct icm *icm = tb_priv(tb); 510 511 /* 512 * Starting from Alpine Ridge we can use ICM on Apple machines 513 * as well. We just need to reset and re-enable it first. 514 */ 515 if (!x86_apple_machine) 516 return true; 517 518 /* 519 * Find the upstream PCIe port in case we need to do reset 520 * through its vendor specific registers. 521 */ 522 upstream_port = get_upstream_port(tb->nhi->pdev); 523 if (upstream_port) { 524 int cap; 525 526 cap = pci_find_ext_capability(upstream_port, 527 PCI_EXT_CAP_ID_VNDR); 528 if (cap > 0) { 529 icm->upstream_port = upstream_port; 530 icm->vnd_cap = cap; 531 532 return true; 533 } 534 } 535 536 return false; 537 } 538 539 static int icm_ar_get_mode(struct tb *tb) 540 { 541 struct tb_nhi *nhi = tb->nhi; 542 int retries = 5; 543 u32 val; 544 545 do { 546 val = ioread32(nhi->iobase + REG_FW_STS); 547 if (val & REG_FW_STS_NVM_AUTH_DONE) 548 break; 549 msleep(30); 550 } while (--retries); 551 552 if (!retries) { 553 dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n"); 554 return -ENODEV; 555 } 556 557 return nhi_mailbox_mode(nhi); 558 } 559 560 static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) 561 { 562 struct icm_ar_pkg_get_route_response reply; 563 struct icm_ar_pkg_get_route request = { 564 .hdr = { .code = ICM_GET_ROUTE }, 565 .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link, 566 }; 567 int ret; 568 569 memset(&reply, 0, sizeof(reply)); 570 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 571 1, ICM_TIMEOUT); 572 if (ret) 573 return ret; 574 575 if (reply.hdr.flags & ICM_FLAGS_ERROR) 576 return -EIO; 577 578 *route = get_route(reply.route_hi, reply.route_lo); 579 return 0; 580 } 581 582 static void icm_handle_notification(struct work_struct *work) 583 { 584 struct icm_notification *n = container_of(work, typeof(*n), work); 585 struct tb *tb = n->tb; 586 struct icm *icm = tb_priv(tb); 587 588 mutex_lock(&tb->lock); 589 590 switch (n->pkg->code) { 591 case ICM_EVENT_DEVICE_CONNECTED: 592 icm->device_connected(tb, n->pkg); 593 break; 594 case ICM_EVENT_DEVICE_DISCONNECTED: 595 icm->device_disconnected(tb, n->pkg); 596 break; 597 } 598 599 mutex_unlock(&tb->lock); 600 601 kfree(n->pkg); 602 kfree(n); 603 } 604 605 static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, 606 const void *buf, size_t size) 607 { 608 struct icm_notification *n; 609 610 n = kmalloc(sizeof(*n), GFP_KERNEL); 611 if (!n) 612 return; 613 614 INIT_WORK(&n->work, icm_handle_notification); 615 n->pkg = kmemdup(buf, size, GFP_KERNEL); 616 n->tb = tb; 617 618 queue_work(tb->wq, &n->work); 619 } 620 621 static int 622 __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level) 623 { 624 struct icm_pkg_driver_ready_response reply; 625 struct icm_pkg_driver_ready request = { 626 .hdr.code = ICM_DRIVER_READY, 627 }; 628 unsigned int retries = 10; 629 int ret; 630 631 memset(&reply, 0, sizeof(reply)); 632 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 633 1, ICM_TIMEOUT); 634 if (ret) 635 return ret; 636 637 if (security_level) 638 *security_level = reply.security_level & 0xf; 639 640 /* 641 * Hold on here until the switch config space is accessible so 642 * that we can read root switch config successfully. 643 */ 644 do { 645 struct tb_cfg_result res; 646 u32 tmp; 647 648 res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH, 649 0, 1, 100); 650 if (!res.err) 651 return 0; 652 653 msleep(50); 654 } while (--retries); 655 656 return -ETIMEDOUT; 657 } 658 659 static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec) 660 { 661 unsigned long end = jiffies + msecs_to_jiffies(timeout_msec); 662 u32 cmd; 663 664 do { 665 pci_read_config_dword(icm->upstream_port, 666 icm->vnd_cap + PCIE2CIO_CMD, &cmd); 667 if (!(cmd & PCIE2CIO_CMD_START)) { 668 if (cmd & PCIE2CIO_CMD_TIMEOUT) 669 break; 670 return 0; 671 } 672 673 msleep(50); 674 } while (time_before(jiffies, end)); 675 676 return -ETIMEDOUT; 677 } 678 679 static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs, 680 unsigned int port, unsigned int index, u32 *data) 681 { 682 struct pci_dev *pdev = icm->upstream_port; 683 int ret, vnd_cap = icm->vnd_cap; 684 u32 cmd; 685 686 cmd = index; 687 cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; 688 cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; 689 cmd |= PCIE2CIO_CMD_START; 690 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); 691 692 ret = pci2cio_wait_completion(icm, 5000); 693 if (ret) 694 return ret; 695 696 pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data); 697 return 0; 698 } 699 700 static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs, 701 unsigned int port, unsigned int index, u32 data) 702 { 703 struct pci_dev *pdev = icm->upstream_port; 704 int vnd_cap = icm->vnd_cap; 705 u32 cmd; 706 707 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data); 708 709 cmd = index; 710 cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; 711 cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; 712 cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START; 713 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); 714 715 return pci2cio_wait_completion(icm, 5000); 716 } 717 718 static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi) 719 { 720 struct icm *icm = tb_priv(tb); 721 u32 val; 722 723 /* Put ARC to wait for CIO reset event to happen */ 724 val = ioread32(nhi->iobase + REG_FW_STS); 725 val |= REG_FW_STS_CIO_RESET_REQ; 726 iowrite32(val, nhi->iobase + REG_FW_STS); 727 728 /* Re-start ARC */ 729 val = ioread32(nhi->iobase + REG_FW_STS); 730 val |= REG_FW_STS_ICM_EN_INVERT; 731 val |= REG_FW_STS_ICM_EN_CPU; 732 iowrite32(val, nhi->iobase + REG_FW_STS); 733 734 /* Trigger CIO reset now */ 735 return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9)); 736 } 737 738 static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi) 739 { 740 unsigned int retries = 10; 741 int ret; 742 u32 val; 743 744 /* Check if the ICM firmware is already running */ 745 val = ioread32(nhi->iobase + REG_FW_STS); 746 if (val & REG_FW_STS_ICM_EN) 747 return 0; 748 749 dev_info(&nhi->pdev->dev, "starting ICM firmware\n"); 750 751 ret = icm_firmware_reset(tb, nhi); 752 if (ret) 753 return ret; 754 755 /* Wait until the ICM firmware tells us it is up and running */ 756 do { 757 /* Check that the ICM firmware is running */ 758 val = ioread32(nhi->iobase + REG_FW_STS); 759 if (val & REG_FW_STS_NVM_AUTH_DONE) 760 return 0; 761 762 msleep(300); 763 } while (--retries); 764 765 return -ETIMEDOUT; 766 } 767 768 static int icm_reset_phy_port(struct tb *tb, int phy_port) 769 { 770 struct icm *icm = tb_priv(tb); 771 u32 state0, state1; 772 int port0, port1; 773 u32 val0, val1; 774 int ret; 775 776 if (!icm->upstream_port) 777 return 0; 778 779 if (phy_port) { 780 port0 = 3; 781 port1 = 4; 782 } else { 783 port0 = 1; 784 port1 = 2; 785 } 786 787 /* 788 * Read link status of both null ports belonging to a single 789 * physical port. 790 */ 791 ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0); 792 if (ret) 793 return ret; 794 ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1); 795 if (ret) 796 return ret; 797 798 state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK; 799 state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT; 800 state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK; 801 state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT; 802 803 /* If they are both up we need to reset them now */ 804 if (state0 != TB_PORT_UP || state1 != TB_PORT_UP) 805 return 0; 806 807 val0 |= PHY_PORT_CS1_LINK_DISABLE; 808 ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0); 809 if (ret) 810 return ret; 811 812 val1 |= PHY_PORT_CS1_LINK_DISABLE; 813 ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1); 814 if (ret) 815 return ret; 816 817 /* Wait a bit and then re-enable both ports */ 818 usleep_range(10, 100); 819 820 ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0); 821 if (ret) 822 return ret; 823 ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1); 824 if (ret) 825 return ret; 826 827 val0 &= ~PHY_PORT_CS1_LINK_DISABLE; 828 ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0); 829 if (ret) 830 return ret; 831 832 val1 &= ~PHY_PORT_CS1_LINK_DISABLE; 833 return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1); 834 } 835 836 static int icm_firmware_init(struct tb *tb) 837 { 838 struct icm *icm = tb_priv(tb); 839 struct tb_nhi *nhi = tb->nhi; 840 int ret; 841 842 ret = icm_firmware_start(tb, nhi); 843 if (ret) { 844 dev_err(&nhi->pdev->dev, "could not start ICM firmware\n"); 845 return ret; 846 } 847 848 if (icm->get_mode) { 849 ret = icm->get_mode(tb); 850 851 switch (ret) { 852 case NHI_FW_SAFE_MODE: 853 icm->safe_mode = true; 854 break; 855 856 case NHI_FW_CM_MODE: 857 /* Ask ICM to accept all Thunderbolt devices */ 858 nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0); 859 break; 860 861 default: 862 tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret); 863 return -ENODEV; 864 } 865 } 866 867 /* 868 * Reset both physical ports if there is anything connected to 869 * them already. 870 */ 871 ret = icm_reset_phy_port(tb, 0); 872 if (ret) 873 dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n"); 874 ret = icm_reset_phy_port(tb, 1); 875 if (ret) 876 dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n"); 877 878 return 0; 879 } 880 881 static int icm_driver_ready(struct tb *tb) 882 { 883 struct icm *icm = tb_priv(tb); 884 int ret; 885 886 ret = icm_firmware_init(tb); 887 if (ret) 888 return ret; 889 890 if (icm->safe_mode) { 891 tb_info(tb, "Thunderbolt host controller is in safe mode.\n"); 892 tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n"); 893 tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n"); 894 return 0; 895 } 896 897 return __icm_driver_ready(tb, &tb->security_level); 898 } 899 900 static int icm_suspend(struct tb *tb) 901 { 902 int ret; 903 904 ret = nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0); 905 if (ret) 906 tb_info(tb, "Ignoring mailbox command error (%d) in %s\n", 907 ret, __func__); 908 909 return 0; 910 } 911 912 /* 913 * Mark all switches (except root switch) below this one unplugged. ICM 914 * firmware will send us an updated list of switches after we have send 915 * it driver ready command. If a switch is not in that list it will be 916 * removed when we perform rescan. 917 */ 918 static void icm_unplug_children(struct tb_switch *sw) 919 { 920 unsigned int i; 921 922 if (tb_route(sw)) 923 sw->is_unplugged = true; 924 925 for (i = 1; i <= sw->config.max_port_number; i++) { 926 struct tb_port *port = &sw->ports[i]; 927 928 if (tb_is_upstream_port(port)) 929 continue; 930 if (!port->remote) 931 continue; 932 933 icm_unplug_children(port->remote->sw); 934 } 935 } 936 937 static void icm_free_unplugged_children(struct tb_switch *sw) 938 { 939 unsigned int i; 940 941 for (i = 1; i <= sw->config.max_port_number; i++) { 942 struct tb_port *port = &sw->ports[i]; 943 944 if (tb_is_upstream_port(port)) 945 continue; 946 if (!port->remote) 947 continue; 948 949 if (port->remote->sw->is_unplugged) { 950 tb_switch_remove(port->remote->sw); 951 port->remote = NULL; 952 } else { 953 icm_free_unplugged_children(port->remote->sw); 954 } 955 } 956 } 957 958 static void icm_rescan_work(struct work_struct *work) 959 { 960 struct icm *icm = container_of(work, struct icm, rescan_work.work); 961 struct tb *tb = icm_to_tb(icm); 962 963 mutex_lock(&tb->lock); 964 if (tb->root_switch) 965 icm_free_unplugged_children(tb->root_switch); 966 mutex_unlock(&tb->lock); 967 } 968 969 static void icm_complete(struct tb *tb) 970 { 971 struct icm *icm = tb_priv(tb); 972 973 if (tb->nhi->going_away) 974 return; 975 976 icm_unplug_children(tb->root_switch); 977 978 /* 979 * Now all existing children should be resumed, start events 980 * from ICM to get updated status. 981 */ 982 __icm_driver_ready(tb, NULL); 983 984 /* 985 * We do not get notifications of devices that have been 986 * unplugged during suspend so schedule rescan to clean them up 987 * if any. 988 */ 989 queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500)); 990 } 991 992 static int icm_start(struct tb *tb) 993 { 994 struct icm *icm = tb_priv(tb); 995 int ret; 996 997 if (icm->safe_mode) 998 tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0); 999 else 1000 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); 1001 if (!tb->root_switch) 1002 return -ENODEV; 1003 1004 /* 1005 * NVM upgrade has not been tested on Apple systems and they 1006 * don't provide images publicly either. To be on the safe side 1007 * prevent root switch NVM upgrade on Macs for now. 1008 */ 1009 tb->root_switch->no_nvm_upgrade = x86_apple_machine; 1010 1011 ret = tb_switch_add(tb->root_switch); 1012 if (ret) 1013 tb_switch_put(tb->root_switch); 1014 1015 return ret; 1016 } 1017 1018 static void icm_stop(struct tb *tb) 1019 { 1020 struct icm *icm = tb_priv(tb); 1021 1022 cancel_delayed_work(&icm->rescan_work); 1023 tb_switch_remove(tb->root_switch); 1024 tb->root_switch = NULL; 1025 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); 1026 } 1027 1028 static int icm_disconnect_pcie_paths(struct tb *tb) 1029 { 1030 return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0); 1031 } 1032 1033 /* Falcon Ridge and Alpine Ridge */ 1034 static const struct tb_cm_ops icm_fr_ops = { 1035 .driver_ready = icm_driver_ready, 1036 .start = icm_start, 1037 .stop = icm_stop, 1038 .suspend = icm_suspend, 1039 .complete = icm_complete, 1040 .handle_event = icm_handle_event, 1041 .approve_switch = icm_fr_approve_switch, 1042 .add_switch_key = icm_fr_add_switch_key, 1043 .challenge_switch_key = icm_fr_challenge_switch_key, 1044 .disconnect_pcie_paths = icm_disconnect_pcie_paths, 1045 }; 1046 1047 struct tb *icm_probe(struct tb_nhi *nhi) 1048 { 1049 struct icm *icm; 1050 struct tb *tb; 1051 1052 tb = tb_domain_alloc(nhi, sizeof(struct icm)); 1053 if (!tb) 1054 return NULL; 1055 1056 icm = tb_priv(tb); 1057 INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work); 1058 mutex_init(&icm->request_lock); 1059 1060 switch (nhi->pdev->device) { 1061 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: 1062 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: 1063 icm->is_supported = icm_fr_is_supported; 1064 icm->get_route = icm_fr_get_route; 1065 icm->device_connected = icm_fr_device_connected; 1066 icm->device_disconnected = icm_fr_device_disconnected; 1067 tb->cm_ops = &icm_fr_ops; 1068 break; 1069 1070 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI: 1071 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI: 1072 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI: 1073 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI: 1074 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI: 1075 icm->is_supported = icm_ar_is_supported; 1076 icm->get_mode = icm_ar_get_mode; 1077 icm->get_route = icm_ar_get_route; 1078 icm->device_connected = icm_fr_device_connected; 1079 icm->device_disconnected = icm_fr_device_disconnected; 1080 tb->cm_ops = &icm_fr_ops; 1081 break; 1082 } 1083 1084 if (!icm->is_supported || !icm->is_supported(tb)) { 1085 dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n"); 1086 tb_domain_put(tb); 1087 return NULL; 1088 } 1089 1090 return tb; 1091 } 1092