1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt XDomain discovery protocol support 4 * 5 * Copyright (C) 2017, Intel Corporation 6 * Authors: Michael Jamet <michael.jamet@intel.com> 7 * Mika Westerberg <mika.westerberg@linux.intel.com> 8 */ 9 10 #include <linux/device.h> 11 #include <linux/delay.h> 12 #include <linux/kmod.h> 13 #include <linux/module.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/utsname.h> 16 #include <linux/uuid.h> 17 #include <linux/workqueue.h> 18 19 #include "tb.h" 20 21 #define XDOMAIN_DEFAULT_TIMEOUT 5000 /* ms */ 22 #define XDOMAIN_UUID_RETRIES 10 23 #define XDOMAIN_PROPERTIES_RETRIES 60 24 #define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10 25 #define XDOMAIN_BONDING_WAIT 100 /* ms */ 26 27 struct xdomain_request_work { 28 struct work_struct work; 29 struct tb_xdp_header *pkg; 30 struct tb *tb; 31 }; 32 33 static bool tb_xdomain_enabled = true; 34 module_param_named(xdomain, tb_xdomain_enabled, bool, 0444); 35 MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)"); 36 37 /* Serializes access to the properties and protocol handlers below */ 38 static DEFINE_MUTEX(xdomain_lock); 39 40 /* Properties exposed to the remote domains */ 41 static struct tb_property_dir *xdomain_property_dir; 42 static u32 *xdomain_property_block; 43 static u32 xdomain_property_block_len; 44 static u32 xdomain_property_block_gen; 45 46 /* Additional protocol handlers */ 47 static LIST_HEAD(protocol_handlers); 48 49 /* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */ 50 static const uuid_t tb_xdp_uuid = 51 UUID_INIT(0xb638d70e, 0x42ff, 0x40bb, 52 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07); 53 54 bool tb_is_xdomain_enabled(void) 55 { 56 return tb_xdomain_enabled && tb_acpi_is_xdomain_allowed(); 57 } 58 59 static bool tb_xdomain_match(const struct tb_cfg_request *req, 60 const struct ctl_pkg *pkg) 61 { 62 switch (pkg->frame.eof) { 63 case TB_CFG_PKG_ERROR: 64 return true; 65 66 case TB_CFG_PKG_XDOMAIN_RESP: { 67 const struct tb_xdp_header *res_hdr = pkg->buffer; 68 const struct tb_xdp_header *req_hdr = req->request; 69 70 if (pkg->frame.size < req->response_size / 4) 71 return false; 72 73 /* Make sure route matches */ 74 if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) != 75 req_hdr->xd_hdr.route_hi) 76 return false; 77 if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo) 78 return false; 79 80 /* Check that the XDomain protocol matches */ 81 if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid)) 82 return false; 83 84 return true; 85 } 86 87 default: 88 return false; 89 } 90 } 91 92 static bool tb_xdomain_copy(struct tb_cfg_request *req, 93 const struct ctl_pkg *pkg) 94 { 95 memcpy(req->response, pkg->buffer, req->response_size); 96 req->result.err = 0; 97 return true; 98 } 99 100 static void response_ready(void *data) 101 { 102 tb_cfg_request_put(data); 103 } 104 105 static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response, 106 size_t size, enum tb_cfg_pkg_type type) 107 { 108 struct tb_cfg_request *req; 109 110 req = tb_cfg_request_alloc(); 111 if (!req) 112 return -ENOMEM; 113 114 req->match = tb_xdomain_match; 115 req->copy = tb_xdomain_copy; 116 req->request = response; 117 req->request_size = size; 118 req->request_type = type; 119 120 return tb_cfg_request(ctl, req, response_ready, req); 121 } 122 123 /** 124 * tb_xdomain_response() - Send a XDomain response message 125 * @xd: XDomain to send the message 126 * @response: Response to send 127 * @size: Size of the response 128 * @type: PDF type of the response 129 * 130 * This can be used to send a XDomain response message to the other 131 * domain. No response for the message is expected. 132 * 133 * Return: %0 in case of success and negative errno in case of failure 134 */ 135 int tb_xdomain_response(struct tb_xdomain *xd, const void *response, 136 size_t size, enum tb_cfg_pkg_type type) 137 { 138 return __tb_xdomain_response(xd->tb->ctl, response, size, type); 139 } 140 EXPORT_SYMBOL_GPL(tb_xdomain_response); 141 142 static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request, 143 size_t request_size, enum tb_cfg_pkg_type request_type, void *response, 144 size_t response_size, enum tb_cfg_pkg_type response_type, 145 unsigned int timeout_msec) 146 { 147 struct tb_cfg_request *req; 148 struct tb_cfg_result res; 149 150 req = tb_cfg_request_alloc(); 151 if (!req) 152 return -ENOMEM; 153 154 req->match = tb_xdomain_match; 155 req->copy = tb_xdomain_copy; 156 req->request = request; 157 req->request_size = request_size; 158 req->request_type = request_type; 159 req->response = response; 160 req->response_size = response_size; 161 req->response_type = response_type; 162 163 res = tb_cfg_request_sync(ctl, req, timeout_msec); 164 165 tb_cfg_request_put(req); 166 167 return res.err == 1 ? -EIO : res.err; 168 } 169 170 /** 171 * tb_xdomain_request() - Send a XDomain request 172 * @xd: XDomain to send the request 173 * @request: Request to send 174 * @request_size: Size of the request in bytes 175 * @request_type: PDF type of the request 176 * @response: Response is copied here 177 * @response_size: Expected size of the response in bytes 178 * @response_type: Expected PDF type of the response 179 * @timeout_msec: Timeout in milliseconds to wait for the response 180 * 181 * This function can be used to send XDomain control channel messages to 182 * the other domain. The function waits until the response is received 183 * or when timeout triggers. Whichever comes first. 184 * 185 * Return: %0 in case of success and negative errno in case of failure 186 */ 187 int tb_xdomain_request(struct tb_xdomain *xd, const void *request, 188 size_t request_size, enum tb_cfg_pkg_type request_type, 189 void *response, size_t response_size, 190 enum tb_cfg_pkg_type response_type, unsigned int timeout_msec) 191 { 192 return __tb_xdomain_request(xd->tb->ctl, request, request_size, 193 request_type, response, response_size, 194 response_type, timeout_msec); 195 } 196 EXPORT_SYMBOL_GPL(tb_xdomain_request); 197 198 static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route, 199 u8 sequence, enum tb_xdp_type type, size_t size) 200 { 201 u32 length_sn; 202 203 length_sn = (size - sizeof(hdr->xd_hdr)) / 4; 204 length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK; 205 206 hdr->xd_hdr.route_hi = upper_32_bits(route); 207 hdr->xd_hdr.route_lo = lower_32_bits(route); 208 hdr->xd_hdr.length_sn = length_sn; 209 hdr->type = type; 210 memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid)); 211 } 212 213 static int tb_xdp_handle_error(const struct tb_xdp_header *hdr) 214 { 215 const struct tb_xdp_error_response *error; 216 217 if (hdr->type != ERROR_RESPONSE) 218 return 0; 219 220 error = (const struct tb_xdp_error_response *)hdr; 221 222 switch (error->error) { 223 case ERROR_UNKNOWN_PACKET: 224 case ERROR_UNKNOWN_DOMAIN: 225 return -EIO; 226 case ERROR_NOT_SUPPORTED: 227 return -ENOTSUPP; 228 case ERROR_NOT_READY: 229 return -EAGAIN; 230 default: 231 break; 232 } 233 234 return 0; 235 } 236 237 static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry, 238 uuid_t *uuid) 239 { 240 struct tb_xdp_uuid_response res; 241 struct tb_xdp_uuid req; 242 int ret; 243 244 memset(&req, 0, sizeof(req)); 245 tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST, 246 sizeof(req)); 247 248 memset(&res, 0, sizeof(res)); 249 ret = __tb_xdomain_request(ctl, &req, sizeof(req), 250 TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res), 251 TB_CFG_PKG_XDOMAIN_RESP, 252 XDOMAIN_DEFAULT_TIMEOUT); 253 if (ret) 254 return ret; 255 256 ret = tb_xdp_handle_error(&res.hdr); 257 if (ret) 258 return ret; 259 260 uuid_copy(uuid, &res.src_uuid); 261 return 0; 262 } 263 264 static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence, 265 const uuid_t *uuid) 266 { 267 struct tb_xdp_uuid_response res; 268 269 memset(&res, 0, sizeof(res)); 270 tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE, 271 sizeof(res)); 272 273 uuid_copy(&res.src_uuid, uuid); 274 res.src_route_hi = upper_32_bits(route); 275 res.src_route_lo = lower_32_bits(route); 276 277 return __tb_xdomain_response(ctl, &res, sizeof(res), 278 TB_CFG_PKG_XDOMAIN_RESP); 279 } 280 281 static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence, 282 enum tb_xdp_error error) 283 { 284 struct tb_xdp_error_response res; 285 286 memset(&res, 0, sizeof(res)); 287 tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE, 288 sizeof(res)); 289 res.error = error; 290 291 return __tb_xdomain_response(ctl, &res, sizeof(res), 292 TB_CFG_PKG_XDOMAIN_RESP); 293 } 294 295 static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route, 296 const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry, 297 u32 **block, u32 *generation) 298 { 299 struct tb_xdp_properties_response *res; 300 struct tb_xdp_properties req; 301 u16 data_len, len; 302 size_t total_size; 303 u32 *data = NULL; 304 int ret; 305 306 total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4; 307 res = kzalloc(total_size, GFP_KERNEL); 308 if (!res) 309 return -ENOMEM; 310 311 memset(&req, 0, sizeof(req)); 312 tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST, 313 sizeof(req)); 314 memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid)); 315 memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid)); 316 317 len = 0; 318 data_len = 0; 319 320 do { 321 ret = __tb_xdomain_request(ctl, &req, sizeof(req), 322 TB_CFG_PKG_XDOMAIN_REQ, res, 323 total_size, TB_CFG_PKG_XDOMAIN_RESP, 324 XDOMAIN_DEFAULT_TIMEOUT); 325 if (ret) 326 goto err; 327 328 ret = tb_xdp_handle_error(&res->hdr); 329 if (ret) 330 goto err; 331 332 /* 333 * Package length includes the whole payload without the 334 * XDomain header. Validate first that the package is at 335 * least size of the response structure. 336 */ 337 len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK; 338 if (len < sizeof(*res) / 4) { 339 ret = -EINVAL; 340 goto err; 341 } 342 343 len += sizeof(res->hdr.xd_hdr) / 4; 344 len -= sizeof(*res) / 4; 345 346 if (res->offset != req.offset) { 347 ret = -EINVAL; 348 goto err; 349 } 350 351 /* 352 * First time allocate block that has enough space for 353 * the whole properties block. 354 */ 355 if (!data) { 356 data_len = res->data_length; 357 if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) { 358 ret = -E2BIG; 359 goto err; 360 } 361 362 data = kcalloc(data_len, sizeof(u32), GFP_KERNEL); 363 if (!data) { 364 ret = -ENOMEM; 365 goto err; 366 } 367 } 368 369 memcpy(data + req.offset, res->data, len * 4); 370 req.offset += len; 371 } while (!data_len || req.offset < data_len); 372 373 *block = data; 374 *generation = res->generation; 375 376 kfree(res); 377 378 return data_len; 379 380 err: 381 kfree(data); 382 kfree(res); 383 384 return ret; 385 } 386 387 static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl, 388 u64 route, u8 sequence, const uuid_t *src_uuid, 389 const struct tb_xdp_properties *req) 390 { 391 struct tb_xdp_properties_response *res; 392 size_t total_size; 393 u16 len; 394 int ret; 395 396 /* 397 * Currently we expect all requests to be directed to us. The 398 * protocol supports forwarding, though which we might add 399 * support later on. 400 */ 401 if (!uuid_equal(src_uuid, &req->dst_uuid)) { 402 tb_xdp_error_response(ctl, route, sequence, 403 ERROR_UNKNOWN_DOMAIN); 404 return 0; 405 } 406 407 mutex_lock(&xdomain_lock); 408 409 if (req->offset >= xdomain_property_block_len) { 410 mutex_unlock(&xdomain_lock); 411 return -EINVAL; 412 } 413 414 len = xdomain_property_block_len - req->offset; 415 len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH); 416 total_size = sizeof(*res) + len * 4; 417 418 res = kzalloc(total_size, GFP_KERNEL); 419 if (!res) { 420 mutex_unlock(&xdomain_lock); 421 return -ENOMEM; 422 } 423 424 tb_xdp_fill_header(&res->hdr, route, sequence, PROPERTIES_RESPONSE, 425 total_size); 426 res->generation = xdomain_property_block_gen; 427 res->data_length = xdomain_property_block_len; 428 res->offset = req->offset; 429 uuid_copy(&res->src_uuid, src_uuid); 430 uuid_copy(&res->dst_uuid, &req->src_uuid); 431 memcpy(res->data, &xdomain_property_block[req->offset], len * 4); 432 433 mutex_unlock(&xdomain_lock); 434 435 ret = __tb_xdomain_response(ctl, res, total_size, 436 TB_CFG_PKG_XDOMAIN_RESP); 437 438 kfree(res); 439 return ret; 440 } 441 442 static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route, 443 int retry, const uuid_t *uuid) 444 { 445 struct tb_xdp_properties_changed_response res; 446 struct tb_xdp_properties_changed req; 447 int ret; 448 449 memset(&req, 0, sizeof(req)); 450 tb_xdp_fill_header(&req.hdr, route, retry % 4, 451 PROPERTIES_CHANGED_REQUEST, sizeof(req)); 452 uuid_copy(&req.src_uuid, uuid); 453 454 memset(&res, 0, sizeof(res)); 455 ret = __tb_xdomain_request(ctl, &req, sizeof(req), 456 TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res), 457 TB_CFG_PKG_XDOMAIN_RESP, 458 XDOMAIN_DEFAULT_TIMEOUT); 459 if (ret) 460 return ret; 461 462 return tb_xdp_handle_error(&res.hdr); 463 } 464 465 static int 466 tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence) 467 { 468 struct tb_xdp_properties_changed_response res; 469 470 memset(&res, 0, sizeof(res)); 471 tb_xdp_fill_header(&res.hdr, route, sequence, 472 PROPERTIES_CHANGED_RESPONSE, sizeof(res)); 473 return __tb_xdomain_response(ctl, &res, sizeof(res), 474 TB_CFG_PKG_XDOMAIN_RESP); 475 } 476 477 /** 478 * tb_register_protocol_handler() - Register protocol handler 479 * @handler: Handler to register 480 * 481 * This allows XDomain service drivers to hook into incoming XDomain 482 * messages. After this function is called the service driver needs to 483 * be able to handle calls to callback whenever a package with the 484 * registered protocol is received. 485 */ 486 int tb_register_protocol_handler(struct tb_protocol_handler *handler) 487 { 488 if (!handler->uuid || !handler->callback) 489 return -EINVAL; 490 if (uuid_equal(handler->uuid, &tb_xdp_uuid)) 491 return -EINVAL; 492 493 mutex_lock(&xdomain_lock); 494 list_add_tail(&handler->list, &protocol_handlers); 495 mutex_unlock(&xdomain_lock); 496 497 return 0; 498 } 499 EXPORT_SYMBOL_GPL(tb_register_protocol_handler); 500 501 /** 502 * tb_unregister_protocol_handler() - Unregister protocol handler 503 * @handler: Handler to unregister 504 * 505 * Removes the previously registered protocol handler. 506 */ 507 void tb_unregister_protocol_handler(struct tb_protocol_handler *handler) 508 { 509 mutex_lock(&xdomain_lock); 510 list_del_init(&handler->list); 511 mutex_unlock(&xdomain_lock); 512 } 513 EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler); 514 515 static int rebuild_property_block(void) 516 { 517 u32 *block, len; 518 int ret; 519 520 ret = tb_property_format_dir(xdomain_property_dir, NULL, 0); 521 if (ret < 0) 522 return ret; 523 524 len = ret; 525 526 block = kcalloc(len, sizeof(u32), GFP_KERNEL); 527 if (!block) 528 return -ENOMEM; 529 530 ret = tb_property_format_dir(xdomain_property_dir, block, len); 531 if (ret) { 532 kfree(block); 533 return ret; 534 } 535 536 kfree(xdomain_property_block); 537 xdomain_property_block = block; 538 xdomain_property_block_len = len; 539 xdomain_property_block_gen++; 540 541 return 0; 542 } 543 544 static void finalize_property_block(void) 545 { 546 const struct tb_property *nodename; 547 548 /* 549 * On first XDomain connection we set up the the system 550 * nodename. This delayed here because userspace may not have it 551 * set when the driver is first probed. 552 */ 553 mutex_lock(&xdomain_lock); 554 nodename = tb_property_find(xdomain_property_dir, "deviceid", 555 TB_PROPERTY_TYPE_TEXT); 556 if (!nodename) { 557 tb_property_add_text(xdomain_property_dir, "deviceid", 558 utsname()->nodename); 559 rebuild_property_block(); 560 } 561 mutex_unlock(&xdomain_lock); 562 } 563 564 static void tb_xdp_handle_request(struct work_struct *work) 565 { 566 struct xdomain_request_work *xw = container_of(work, typeof(*xw), work); 567 const struct tb_xdp_header *pkg = xw->pkg; 568 const struct tb_xdomain_header *xhdr = &pkg->xd_hdr; 569 struct tb *tb = xw->tb; 570 struct tb_ctl *ctl = tb->ctl; 571 const uuid_t *uuid; 572 int ret = 0; 573 u32 sequence; 574 u64 route; 575 576 route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63); 577 sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK; 578 sequence >>= TB_XDOMAIN_SN_SHIFT; 579 580 mutex_lock(&tb->lock); 581 if (tb->root_switch) 582 uuid = tb->root_switch->uuid; 583 else 584 uuid = NULL; 585 mutex_unlock(&tb->lock); 586 587 if (!uuid) { 588 tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY); 589 goto out; 590 } 591 592 finalize_property_block(); 593 594 tb_dbg(tb, "%llx: received XDomain request %#x\n", route, pkg->type); 595 596 switch (pkg->type) { 597 case PROPERTIES_REQUEST: 598 ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid, 599 (const struct tb_xdp_properties *)pkg); 600 break; 601 602 case PROPERTIES_CHANGED_REQUEST: { 603 struct tb_xdomain *xd; 604 605 ret = tb_xdp_properties_changed_response(ctl, route, sequence); 606 607 /* 608 * Since the properties have been changed, let's update 609 * the xdomain related to this connection as well in 610 * case there is a change in services it offers. 611 */ 612 xd = tb_xdomain_find_by_route_locked(tb, route); 613 if (xd) { 614 if (device_is_registered(&xd->dev)) { 615 queue_delayed_work(tb->wq, &xd->get_properties_work, 616 msecs_to_jiffies(50)); 617 } 618 tb_xdomain_put(xd); 619 } 620 621 break; 622 } 623 624 case UUID_REQUEST_OLD: 625 case UUID_REQUEST: 626 ret = tb_xdp_uuid_response(ctl, route, sequence, uuid); 627 break; 628 629 default: 630 tb_xdp_error_response(ctl, route, sequence, 631 ERROR_NOT_SUPPORTED); 632 break; 633 } 634 635 if (ret) { 636 tb_warn(tb, "failed to send XDomain response for %#x\n", 637 pkg->type); 638 } 639 640 out: 641 kfree(xw->pkg); 642 kfree(xw); 643 644 tb_domain_put(tb); 645 } 646 647 static bool 648 tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr, 649 size_t size) 650 { 651 struct xdomain_request_work *xw; 652 653 xw = kmalloc(sizeof(*xw), GFP_KERNEL); 654 if (!xw) 655 return false; 656 657 INIT_WORK(&xw->work, tb_xdp_handle_request); 658 xw->pkg = kmemdup(hdr, size, GFP_KERNEL); 659 if (!xw->pkg) { 660 kfree(xw); 661 return false; 662 } 663 xw->tb = tb_domain_get(tb); 664 665 schedule_work(&xw->work); 666 return true; 667 } 668 669 /** 670 * tb_register_service_driver() - Register XDomain service driver 671 * @drv: Driver to register 672 * 673 * Registers new service driver from @drv to the bus. 674 */ 675 int tb_register_service_driver(struct tb_service_driver *drv) 676 { 677 drv->driver.bus = &tb_bus_type; 678 return driver_register(&drv->driver); 679 } 680 EXPORT_SYMBOL_GPL(tb_register_service_driver); 681 682 /** 683 * tb_unregister_service_driver() - Unregister XDomain service driver 684 * @drv: Driver to unregister 685 * 686 * Unregisters XDomain service driver from the bus. 687 */ 688 void tb_unregister_service_driver(struct tb_service_driver *drv) 689 { 690 driver_unregister(&drv->driver); 691 } 692 EXPORT_SYMBOL_GPL(tb_unregister_service_driver); 693 694 static ssize_t key_show(struct device *dev, struct device_attribute *attr, 695 char *buf) 696 { 697 struct tb_service *svc = container_of(dev, struct tb_service, dev); 698 699 /* 700 * It should be null terminated but anything else is pretty much 701 * allowed. 702 */ 703 return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key); 704 } 705 static DEVICE_ATTR_RO(key); 706 707 static int get_modalias(struct tb_service *svc, char *buf, size_t size) 708 { 709 return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key, 710 svc->prtcid, svc->prtcvers, svc->prtcrevs); 711 } 712 713 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 714 char *buf) 715 { 716 struct tb_service *svc = container_of(dev, struct tb_service, dev); 717 718 /* Full buffer size except new line and null termination */ 719 get_modalias(svc, buf, PAGE_SIZE - 2); 720 return sprintf(buf, "%s\n", buf); 721 } 722 static DEVICE_ATTR_RO(modalias); 723 724 static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr, 725 char *buf) 726 { 727 struct tb_service *svc = container_of(dev, struct tb_service, dev); 728 729 return sprintf(buf, "%u\n", svc->prtcid); 730 } 731 static DEVICE_ATTR_RO(prtcid); 732 733 static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr, 734 char *buf) 735 { 736 struct tb_service *svc = container_of(dev, struct tb_service, dev); 737 738 return sprintf(buf, "%u\n", svc->prtcvers); 739 } 740 static DEVICE_ATTR_RO(prtcvers); 741 742 static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr, 743 char *buf) 744 { 745 struct tb_service *svc = container_of(dev, struct tb_service, dev); 746 747 return sprintf(buf, "%u\n", svc->prtcrevs); 748 } 749 static DEVICE_ATTR_RO(prtcrevs); 750 751 static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr, 752 char *buf) 753 { 754 struct tb_service *svc = container_of(dev, struct tb_service, dev); 755 756 return sprintf(buf, "0x%08x\n", svc->prtcstns); 757 } 758 static DEVICE_ATTR_RO(prtcstns); 759 760 static struct attribute *tb_service_attrs[] = { 761 &dev_attr_key.attr, 762 &dev_attr_modalias.attr, 763 &dev_attr_prtcid.attr, 764 &dev_attr_prtcvers.attr, 765 &dev_attr_prtcrevs.attr, 766 &dev_attr_prtcstns.attr, 767 NULL, 768 }; 769 770 static const struct attribute_group tb_service_attr_group = { 771 .attrs = tb_service_attrs, 772 }; 773 774 static const struct attribute_group *tb_service_attr_groups[] = { 775 &tb_service_attr_group, 776 NULL, 777 }; 778 779 static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env) 780 { 781 struct tb_service *svc = container_of(dev, struct tb_service, dev); 782 char modalias[64]; 783 784 get_modalias(svc, modalias, sizeof(modalias)); 785 return add_uevent_var(env, "MODALIAS=%s", modalias); 786 } 787 788 static void tb_service_release(struct device *dev) 789 { 790 struct tb_service *svc = container_of(dev, struct tb_service, dev); 791 struct tb_xdomain *xd = tb_service_parent(svc); 792 793 tb_service_debugfs_remove(svc); 794 ida_simple_remove(&xd->service_ids, svc->id); 795 kfree(svc->key); 796 kfree(svc); 797 } 798 799 struct device_type tb_service_type = { 800 .name = "thunderbolt_service", 801 .groups = tb_service_attr_groups, 802 .uevent = tb_service_uevent, 803 .release = tb_service_release, 804 }; 805 EXPORT_SYMBOL_GPL(tb_service_type); 806 807 static int remove_missing_service(struct device *dev, void *data) 808 { 809 struct tb_xdomain *xd = data; 810 struct tb_service *svc; 811 812 svc = tb_to_service(dev); 813 if (!svc) 814 return 0; 815 816 if (!tb_property_find(xd->properties, svc->key, 817 TB_PROPERTY_TYPE_DIRECTORY)) 818 device_unregister(dev); 819 820 return 0; 821 } 822 823 static int find_service(struct device *dev, void *data) 824 { 825 const struct tb_property *p = data; 826 struct tb_service *svc; 827 828 svc = tb_to_service(dev); 829 if (!svc) 830 return 0; 831 832 return !strcmp(svc->key, p->key); 833 } 834 835 static int populate_service(struct tb_service *svc, 836 struct tb_property *property) 837 { 838 struct tb_property_dir *dir = property->value.dir; 839 struct tb_property *p; 840 841 /* Fill in standard properties */ 842 p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE); 843 if (p) 844 svc->prtcid = p->value.immediate; 845 p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE); 846 if (p) 847 svc->prtcvers = p->value.immediate; 848 p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE); 849 if (p) 850 svc->prtcrevs = p->value.immediate; 851 p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE); 852 if (p) 853 svc->prtcstns = p->value.immediate; 854 855 svc->key = kstrdup(property->key, GFP_KERNEL); 856 if (!svc->key) 857 return -ENOMEM; 858 859 return 0; 860 } 861 862 static void enumerate_services(struct tb_xdomain *xd) 863 { 864 struct tb_service *svc; 865 struct tb_property *p; 866 struct device *dev; 867 int id; 868 869 /* 870 * First remove all services that are not available anymore in 871 * the updated property block. 872 */ 873 device_for_each_child_reverse(&xd->dev, xd, remove_missing_service); 874 875 /* Then re-enumerate properties creating new services as we go */ 876 tb_property_for_each(xd->properties, p) { 877 if (p->type != TB_PROPERTY_TYPE_DIRECTORY) 878 continue; 879 880 /* If the service exists already we are fine */ 881 dev = device_find_child(&xd->dev, p, find_service); 882 if (dev) { 883 put_device(dev); 884 continue; 885 } 886 887 svc = kzalloc(sizeof(*svc), GFP_KERNEL); 888 if (!svc) 889 break; 890 891 if (populate_service(svc, p)) { 892 kfree(svc); 893 break; 894 } 895 896 id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL); 897 if (id < 0) { 898 kfree(svc->key); 899 kfree(svc); 900 break; 901 } 902 svc->id = id; 903 svc->dev.bus = &tb_bus_type; 904 svc->dev.type = &tb_service_type; 905 svc->dev.parent = &xd->dev; 906 dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id); 907 908 tb_service_debugfs_init(svc); 909 910 if (device_register(&svc->dev)) { 911 put_device(&svc->dev); 912 break; 913 } 914 } 915 } 916 917 static int populate_properties(struct tb_xdomain *xd, 918 struct tb_property_dir *dir) 919 { 920 const struct tb_property *p; 921 922 /* Required properties */ 923 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE); 924 if (!p) 925 return -EINVAL; 926 xd->device = p->value.immediate; 927 928 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE); 929 if (!p) 930 return -EINVAL; 931 xd->vendor = p->value.immediate; 932 933 kfree(xd->device_name); 934 xd->device_name = NULL; 935 kfree(xd->vendor_name); 936 xd->vendor_name = NULL; 937 938 /* Optional properties */ 939 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT); 940 if (p) 941 xd->device_name = kstrdup(p->value.text, GFP_KERNEL); 942 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT); 943 if (p) 944 xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL); 945 946 return 0; 947 } 948 949 static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd) 950 { 951 return tb_to_switch(xd->dev.parent); 952 } 953 954 static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd) 955 { 956 bool change = false; 957 struct tb_port *port; 958 int ret; 959 960 port = tb_port_at(xd->route, tb_xdomain_parent(xd)); 961 962 ret = tb_port_get_link_speed(port); 963 if (ret < 0) 964 return ret; 965 966 if (xd->link_speed != ret) 967 change = true; 968 969 xd->link_speed = ret; 970 971 ret = tb_port_get_link_width(port); 972 if (ret < 0) 973 return ret; 974 975 if (xd->link_width != ret) 976 change = true; 977 978 xd->link_width = ret; 979 980 if (change) 981 kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE); 982 983 return 0; 984 } 985 986 static void tb_xdomain_get_uuid(struct work_struct *work) 987 { 988 struct tb_xdomain *xd = container_of(work, typeof(*xd), 989 get_uuid_work.work); 990 struct tb *tb = xd->tb; 991 uuid_t uuid; 992 int ret; 993 994 dev_dbg(&xd->dev, "requesting remote UUID\n"); 995 996 ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid); 997 if (ret < 0) { 998 if (xd->uuid_retries-- > 0) { 999 dev_dbg(&xd->dev, "failed to request UUID, retrying\n"); 1000 queue_delayed_work(xd->tb->wq, &xd->get_uuid_work, 1001 msecs_to_jiffies(100)); 1002 } else { 1003 dev_dbg(&xd->dev, "failed to read remote UUID\n"); 1004 } 1005 return; 1006 } 1007 1008 dev_dbg(&xd->dev, "got remote UUID %pUb\n", &uuid); 1009 1010 if (uuid_equal(&uuid, xd->local_uuid)) 1011 dev_dbg(&xd->dev, "intra-domain loop detected\n"); 1012 1013 /* 1014 * If the UUID is different, there is another domain connected 1015 * so mark this one unplugged and wait for the connection 1016 * manager to replace it. 1017 */ 1018 if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) { 1019 dev_dbg(&xd->dev, "remote UUID is different, unplugging\n"); 1020 xd->is_unplugged = true; 1021 return; 1022 } 1023 1024 /* First time fill in the missing UUID */ 1025 if (!xd->remote_uuid) { 1026 xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL); 1027 if (!xd->remote_uuid) 1028 return; 1029 } 1030 1031 /* Now we can start the normal properties exchange */ 1032 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, 1033 msecs_to_jiffies(100)); 1034 queue_delayed_work(xd->tb->wq, &xd->get_properties_work, 1035 msecs_to_jiffies(1000)); 1036 } 1037 1038 static void tb_xdomain_get_properties(struct work_struct *work) 1039 { 1040 struct tb_xdomain *xd = container_of(work, typeof(*xd), 1041 get_properties_work.work); 1042 struct tb_property_dir *dir; 1043 struct tb *tb = xd->tb; 1044 bool update = false; 1045 u32 *block = NULL; 1046 u32 gen = 0; 1047 int ret; 1048 1049 dev_dbg(&xd->dev, "requesting remote properties\n"); 1050 1051 ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid, 1052 xd->remote_uuid, xd->properties_retries, 1053 &block, &gen); 1054 if (ret < 0) { 1055 if (xd->properties_retries-- > 0) { 1056 dev_dbg(&xd->dev, 1057 "failed to request remote properties, retrying\n"); 1058 queue_delayed_work(xd->tb->wq, &xd->get_properties_work, 1059 msecs_to_jiffies(1000)); 1060 } else { 1061 /* Give up now */ 1062 dev_err(&xd->dev, 1063 "failed read XDomain properties from %pUb\n", 1064 xd->remote_uuid); 1065 } 1066 return; 1067 } 1068 1069 xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES; 1070 1071 mutex_lock(&xd->lock); 1072 1073 /* Only accept newer generation properties */ 1074 if (xd->properties && gen <= xd->property_block_gen) 1075 goto err_free_block; 1076 1077 dir = tb_property_parse_dir(block, ret); 1078 if (!dir) { 1079 dev_err(&xd->dev, "failed to parse XDomain properties\n"); 1080 goto err_free_block; 1081 } 1082 1083 ret = populate_properties(xd, dir); 1084 if (ret) { 1085 dev_err(&xd->dev, "missing XDomain properties in response\n"); 1086 goto err_free_dir; 1087 } 1088 1089 /* Release the existing one */ 1090 if (xd->properties) { 1091 tb_property_free_dir(xd->properties); 1092 update = true; 1093 } 1094 1095 xd->properties = dir; 1096 xd->property_block_gen = gen; 1097 1098 tb_xdomain_update_link_attributes(xd); 1099 1100 mutex_unlock(&xd->lock); 1101 1102 kfree(block); 1103 1104 /* 1105 * Now the device should be ready enough so we can add it to the 1106 * bus and let userspace know about it. If the device is already 1107 * registered, we notify the userspace that it has changed. 1108 */ 1109 if (!update) { 1110 if (device_add(&xd->dev)) { 1111 dev_err(&xd->dev, "failed to add XDomain device\n"); 1112 return; 1113 } 1114 dev_info(&xd->dev, "new host found, vendor=%#x device=%#x\n", 1115 xd->vendor, xd->device); 1116 if (xd->vendor_name && xd->device_name) 1117 dev_info(&xd->dev, "%s %s\n", xd->vendor_name, 1118 xd->device_name); 1119 } else { 1120 kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE); 1121 } 1122 1123 enumerate_services(xd); 1124 return; 1125 1126 err_free_dir: 1127 tb_property_free_dir(dir); 1128 err_free_block: 1129 kfree(block); 1130 mutex_unlock(&xd->lock); 1131 } 1132 1133 static void tb_xdomain_properties_changed(struct work_struct *work) 1134 { 1135 struct tb_xdomain *xd = container_of(work, typeof(*xd), 1136 properties_changed_work.work); 1137 int ret; 1138 1139 dev_dbg(&xd->dev, "sending properties changed notification\n"); 1140 1141 ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route, 1142 xd->properties_changed_retries, xd->local_uuid); 1143 if (ret) { 1144 if (xd->properties_changed_retries-- > 0) { 1145 dev_dbg(&xd->dev, 1146 "failed to send properties changed notification, retrying\n"); 1147 queue_delayed_work(xd->tb->wq, 1148 &xd->properties_changed_work, 1149 msecs_to_jiffies(1000)); 1150 } 1151 dev_err(&xd->dev, "failed to send properties changed notification\n"); 1152 return; 1153 } 1154 1155 xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES; 1156 } 1157 1158 static ssize_t device_show(struct device *dev, struct device_attribute *attr, 1159 char *buf) 1160 { 1161 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); 1162 1163 return sprintf(buf, "%#x\n", xd->device); 1164 } 1165 static DEVICE_ATTR_RO(device); 1166 1167 static ssize_t 1168 device_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1169 { 1170 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); 1171 int ret; 1172 1173 if (mutex_lock_interruptible(&xd->lock)) 1174 return -ERESTARTSYS; 1175 ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : ""); 1176 mutex_unlock(&xd->lock); 1177 1178 return ret; 1179 } 1180 static DEVICE_ATTR_RO(device_name); 1181 1182 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 1183 char *buf) 1184 { 1185 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); 1186 1187 return sprintf(buf, "%#x\n", xd->vendor); 1188 } 1189 static DEVICE_ATTR_RO(vendor); 1190 1191 static ssize_t 1192 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1193 { 1194 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); 1195 int ret; 1196 1197 if (mutex_lock_interruptible(&xd->lock)) 1198 return -ERESTARTSYS; 1199 ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : ""); 1200 mutex_unlock(&xd->lock); 1201 1202 return ret; 1203 } 1204 static DEVICE_ATTR_RO(vendor_name); 1205 1206 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, 1207 char *buf) 1208 { 1209 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); 1210 1211 return sprintf(buf, "%pUb\n", xd->remote_uuid); 1212 } 1213 static DEVICE_ATTR_RO(unique_id); 1214 1215 static ssize_t speed_show(struct device *dev, struct device_attribute *attr, 1216 char *buf) 1217 { 1218 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); 1219 1220 return sprintf(buf, "%u.0 Gb/s\n", xd->link_speed); 1221 } 1222 1223 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); 1224 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); 1225 1226 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr, 1227 char *buf) 1228 { 1229 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); 1230 1231 return sprintf(buf, "%u\n", xd->link_width); 1232 } 1233 1234 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL); 1235 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL); 1236 1237 static struct attribute *xdomain_attrs[] = { 1238 &dev_attr_device.attr, 1239 &dev_attr_device_name.attr, 1240 &dev_attr_rx_lanes.attr, 1241 &dev_attr_rx_speed.attr, 1242 &dev_attr_tx_lanes.attr, 1243 &dev_attr_tx_speed.attr, 1244 &dev_attr_unique_id.attr, 1245 &dev_attr_vendor.attr, 1246 &dev_attr_vendor_name.attr, 1247 NULL, 1248 }; 1249 1250 static const struct attribute_group xdomain_attr_group = { 1251 .attrs = xdomain_attrs, 1252 }; 1253 1254 static const struct attribute_group *xdomain_attr_groups[] = { 1255 &xdomain_attr_group, 1256 NULL, 1257 }; 1258 1259 static void tb_xdomain_release(struct device *dev) 1260 { 1261 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); 1262 1263 put_device(xd->dev.parent); 1264 1265 tb_property_free_dir(xd->properties); 1266 ida_destroy(&xd->service_ids); 1267 1268 kfree(xd->local_uuid); 1269 kfree(xd->remote_uuid); 1270 kfree(xd->device_name); 1271 kfree(xd->vendor_name); 1272 kfree(xd); 1273 } 1274 1275 static void start_handshake(struct tb_xdomain *xd) 1276 { 1277 xd->uuid_retries = XDOMAIN_UUID_RETRIES; 1278 xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES; 1279 xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES; 1280 1281 if (xd->needs_uuid) { 1282 queue_delayed_work(xd->tb->wq, &xd->get_uuid_work, 1283 msecs_to_jiffies(100)); 1284 } else { 1285 /* Start exchanging properties with the other host */ 1286 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, 1287 msecs_to_jiffies(100)); 1288 queue_delayed_work(xd->tb->wq, &xd->get_properties_work, 1289 msecs_to_jiffies(1000)); 1290 } 1291 } 1292 1293 static void stop_handshake(struct tb_xdomain *xd) 1294 { 1295 xd->uuid_retries = 0; 1296 xd->properties_retries = 0; 1297 xd->properties_changed_retries = 0; 1298 1299 cancel_delayed_work_sync(&xd->get_uuid_work); 1300 cancel_delayed_work_sync(&xd->get_properties_work); 1301 cancel_delayed_work_sync(&xd->properties_changed_work); 1302 } 1303 1304 static int __maybe_unused tb_xdomain_suspend(struct device *dev) 1305 { 1306 stop_handshake(tb_to_xdomain(dev)); 1307 return 0; 1308 } 1309 1310 static int __maybe_unused tb_xdomain_resume(struct device *dev) 1311 { 1312 start_handshake(tb_to_xdomain(dev)); 1313 return 0; 1314 } 1315 1316 static const struct dev_pm_ops tb_xdomain_pm_ops = { 1317 SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume) 1318 }; 1319 1320 struct device_type tb_xdomain_type = { 1321 .name = "thunderbolt_xdomain", 1322 .release = tb_xdomain_release, 1323 .pm = &tb_xdomain_pm_ops, 1324 }; 1325 EXPORT_SYMBOL_GPL(tb_xdomain_type); 1326 1327 /** 1328 * tb_xdomain_alloc() - Allocate new XDomain object 1329 * @tb: Domain where the XDomain belongs 1330 * @parent: Parent device (the switch through the connection to the 1331 * other domain is reached). 1332 * @route: Route string used to reach the other domain 1333 * @local_uuid: Our local domain UUID 1334 * @remote_uuid: UUID of the other domain (optional) 1335 * 1336 * Allocates new XDomain structure and returns pointer to that. The 1337 * object must be released by calling tb_xdomain_put(). 1338 */ 1339 struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, 1340 u64 route, const uuid_t *local_uuid, 1341 const uuid_t *remote_uuid) 1342 { 1343 struct tb_switch *parent_sw = tb_to_switch(parent); 1344 struct tb_xdomain *xd; 1345 struct tb_port *down; 1346 1347 /* Make sure the downstream domain is accessible */ 1348 down = tb_port_at(route, parent_sw); 1349 tb_port_unlock(down); 1350 1351 xd = kzalloc(sizeof(*xd), GFP_KERNEL); 1352 if (!xd) 1353 return NULL; 1354 1355 xd->tb = tb; 1356 xd->route = route; 1357 ida_init(&xd->service_ids); 1358 mutex_init(&xd->lock); 1359 INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid); 1360 INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties); 1361 INIT_DELAYED_WORK(&xd->properties_changed_work, 1362 tb_xdomain_properties_changed); 1363 1364 xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL); 1365 if (!xd->local_uuid) 1366 goto err_free; 1367 1368 if (remote_uuid) { 1369 xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), 1370 GFP_KERNEL); 1371 if (!xd->remote_uuid) 1372 goto err_free_local_uuid; 1373 } else { 1374 xd->needs_uuid = true; 1375 } 1376 1377 device_initialize(&xd->dev); 1378 xd->dev.parent = get_device(parent); 1379 xd->dev.bus = &tb_bus_type; 1380 xd->dev.type = &tb_xdomain_type; 1381 xd->dev.groups = xdomain_attr_groups; 1382 dev_set_name(&xd->dev, "%u-%llx", tb->index, route); 1383 1384 dev_dbg(&xd->dev, "local UUID %pUb\n", local_uuid); 1385 if (remote_uuid) 1386 dev_dbg(&xd->dev, "remote UUID %pUb\n", remote_uuid); 1387 1388 /* 1389 * This keeps the DMA powered on as long as we have active 1390 * connection to another host. 1391 */ 1392 pm_runtime_set_active(&xd->dev); 1393 pm_runtime_get_noresume(&xd->dev); 1394 pm_runtime_enable(&xd->dev); 1395 1396 return xd; 1397 1398 err_free_local_uuid: 1399 kfree(xd->local_uuid); 1400 err_free: 1401 kfree(xd); 1402 1403 return NULL; 1404 } 1405 1406 /** 1407 * tb_xdomain_add() - Add XDomain to the bus 1408 * @xd: XDomain to add 1409 * 1410 * This function starts XDomain discovery protocol handshake and 1411 * eventually adds the XDomain to the bus. After calling this function 1412 * the caller needs to call tb_xdomain_remove() in order to remove and 1413 * release the object regardless whether the handshake succeeded or not. 1414 */ 1415 void tb_xdomain_add(struct tb_xdomain *xd) 1416 { 1417 /* Start exchanging properties with the other host */ 1418 start_handshake(xd); 1419 } 1420 1421 static int unregister_service(struct device *dev, void *data) 1422 { 1423 device_unregister(dev); 1424 return 0; 1425 } 1426 1427 /** 1428 * tb_xdomain_remove() - Remove XDomain from the bus 1429 * @xd: XDomain to remove 1430 * 1431 * This will stop all ongoing configuration work and remove the XDomain 1432 * along with any services from the bus. When the last reference to @xd 1433 * is released the object will be released as well. 1434 */ 1435 void tb_xdomain_remove(struct tb_xdomain *xd) 1436 { 1437 stop_handshake(xd); 1438 1439 device_for_each_child_reverse(&xd->dev, xd, unregister_service); 1440 1441 /* 1442 * Undo runtime PM here explicitly because it is possible that 1443 * the XDomain was never added to the bus and thus device_del() 1444 * is not called for it (device_del() would handle this otherwise). 1445 */ 1446 pm_runtime_disable(&xd->dev); 1447 pm_runtime_put_noidle(&xd->dev); 1448 pm_runtime_set_suspended(&xd->dev); 1449 1450 if (!device_is_registered(&xd->dev)) { 1451 put_device(&xd->dev); 1452 } else { 1453 dev_info(&xd->dev, "host disconnected\n"); 1454 device_unregister(&xd->dev); 1455 } 1456 } 1457 1458 /** 1459 * tb_xdomain_lane_bonding_enable() - Enable lane bonding on XDomain 1460 * @xd: XDomain connection 1461 * 1462 * Lane bonding is disabled by default for XDomains. This function tries 1463 * to enable bonding by first enabling the port and waiting for the CL0 1464 * state. 1465 * 1466 * Return: %0 in case of success and negative errno in case of error. 1467 */ 1468 int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd) 1469 { 1470 struct tb_port *port; 1471 int ret; 1472 1473 port = tb_port_at(xd->route, tb_xdomain_parent(xd)); 1474 if (!port->dual_link_port) 1475 return -ENODEV; 1476 1477 ret = tb_port_enable(port->dual_link_port); 1478 if (ret) 1479 return ret; 1480 1481 ret = tb_wait_for_port(port->dual_link_port, true); 1482 if (ret < 0) 1483 return ret; 1484 if (!ret) 1485 return -ENOTCONN; 1486 1487 ret = tb_port_lane_bonding_enable(port); 1488 if (ret) { 1489 tb_port_warn(port, "failed to enable lane bonding\n"); 1490 return ret; 1491 } 1492 1493 tb_xdomain_update_link_attributes(xd); 1494 1495 dev_dbg(&xd->dev, "lane bonding enabled\n"); 1496 return 0; 1497 } 1498 EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable); 1499 1500 /** 1501 * tb_xdomain_lane_bonding_disable() - Disable lane bonding 1502 * @xd: XDomain connection 1503 * 1504 * Lane bonding is disabled by default for XDomains. If bonding has been 1505 * enabled, this function can be used to disable it. 1506 */ 1507 void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd) 1508 { 1509 struct tb_port *port; 1510 1511 port = tb_port_at(xd->route, tb_xdomain_parent(xd)); 1512 if (port->dual_link_port) { 1513 tb_port_lane_bonding_disable(port); 1514 tb_port_disable(port->dual_link_port); 1515 tb_xdomain_update_link_attributes(xd); 1516 1517 dev_dbg(&xd->dev, "lane bonding disabled\n"); 1518 } 1519 } 1520 EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable); 1521 1522 /** 1523 * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection 1524 * @xd: XDomain connection 1525 * @transmit_path: HopID of the transmit path the other end is using to 1526 * send packets 1527 * @transmit_ring: DMA ring used to receive packets from the other end 1528 * @receive_path: HopID of the receive path the other end is using to 1529 * receive packets 1530 * @receive_ring: DMA ring used to send packets to the other end 1531 * 1532 * The function enables DMA paths accordingly so that after successful 1533 * return the caller can send and receive packets using high-speed DMA 1534 * path. 1535 * 1536 * Return: %0 in case of success and negative errno in case of error 1537 */ 1538 int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path, 1539 u16 transmit_ring, u16 receive_path, 1540 u16 receive_ring) 1541 { 1542 int ret; 1543 1544 mutex_lock(&xd->lock); 1545 1546 if (xd->transmit_path) { 1547 ret = xd->transmit_path == transmit_path ? 0 : -EBUSY; 1548 goto exit_unlock; 1549 } 1550 1551 xd->transmit_path = transmit_path; 1552 xd->transmit_ring = transmit_ring; 1553 xd->receive_path = receive_path; 1554 xd->receive_ring = receive_ring; 1555 1556 ret = tb_domain_approve_xdomain_paths(xd->tb, xd); 1557 1558 exit_unlock: 1559 mutex_unlock(&xd->lock); 1560 1561 return ret; 1562 } 1563 EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths); 1564 1565 /** 1566 * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection 1567 * @xd: XDomain connection 1568 * 1569 * This does the opposite of tb_xdomain_enable_paths(). After call to 1570 * this the caller is not expected to use the rings anymore. 1571 * 1572 * Return: %0 in case of success and negative errno in case of error 1573 */ 1574 int tb_xdomain_disable_paths(struct tb_xdomain *xd) 1575 { 1576 int ret = 0; 1577 1578 mutex_lock(&xd->lock); 1579 if (xd->transmit_path) { 1580 xd->transmit_path = 0; 1581 xd->transmit_ring = 0; 1582 xd->receive_path = 0; 1583 xd->receive_ring = 0; 1584 1585 ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd); 1586 } 1587 mutex_unlock(&xd->lock); 1588 1589 return ret; 1590 } 1591 EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths); 1592 1593 struct tb_xdomain_lookup { 1594 const uuid_t *uuid; 1595 u8 link; 1596 u8 depth; 1597 u64 route; 1598 }; 1599 1600 static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw, 1601 const struct tb_xdomain_lookup *lookup) 1602 { 1603 struct tb_port *port; 1604 1605 tb_switch_for_each_port(sw, port) { 1606 struct tb_xdomain *xd; 1607 1608 if (port->xdomain) { 1609 xd = port->xdomain; 1610 1611 if (lookup->uuid) { 1612 if (xd->remote_uuid && 1613 uuid_equal(xd->remote_uuid, lookup->uuid)) 1614 return xd; 1615 } else if (lookup->link && 1616 lookup->link == xd->link && 1617 lookup->depth == xd->depth) { 1618 return xd; 1619 } else if (lookup->route && 1620 lookup->route == xd->route) { 1621 return xd; 1622 } 1623 } else if (tb_port_has_remote(port)) { 1624 xd = switch_find_xdomain(port->remote->sw, lookup); 1625 if (xd) 1626 return xd; 1627 } 1628 } 1629 1630 return NULL; 1631 } 1632 1633 /** 1634 * tb_xdomain_find_by_uuid() - Find an XDomain by UUID 1635 * @tb: Domain where the XDomain belongs to 1636 * @uuid: UUID to look for 1637 * 1638 * Finds XDomain by walking through the Thunderbolt topology below @tb. 1639 * The returned XDomain will have its reference count increased so the 1640 * caller needs to call tb_xdomain_put() when it is done with the 1641 * object. 1642 * 1643 * This will find all XDomains including the ones that are not yet added 1644 * to the bus (handshake is still in progress). 1645 * 1646 * The caller needs to hold @tb->lock. 1647 */ 1648 struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid) 1649 { 1650 struct tb_xdomain_lookup lookup; 1651 struct tb_xdomain *xd; 1652 1653 memset(&lookup, 0, sizeof(lookup)); 1654 lookup.uuid = uuid; 1655 1656 xd = switch_find_xdomain(tb->root_switch, &lookup); 1657 return tb_xdomain_get(xd); 1658 } 1659 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid); 1660 1661 /** 1662 * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth 1663 * @tb: Domain where the XDomain belongs to 1664 * @link: Root switch link number 1665 * @depth: Depth in the link 1666 * 1667 * Finds XDomain by walking through the Thunderbolt topology below @tb. 1668 * The returned XDomain will have its reference count increased so the 1669 * caller needs to call tb_xdomain_put() when it is done with the 1670 * object. 1671 * 1672 * This will find all XDomains including the ones that are not yet added 1673 * to the bus (handshake is still in progress). 1674 * 1675 * The caller needs to hold @tb->lock. 1676 */ 1677 struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link, 1678 u8 depth) 1679 { 1680 struct tb_xdomain_lookup lookup; 1681 struct tb_xdomain *xd; 1682 1683 memset(&lookup, 0, sizeof(lookup)); 1684 lookup.link = link; 1685 lookup.depth = depth; 1686 1687 xd = switch_find_xdomain(tb->root_switch, &lookup); 1688 return tb_xdomain_get(xd); 1689 } 1690 1691 /** 1692 * tb_xdomain_find_by_route() - Find an XDomain by route string 1693 * @tb: Domain where the XDomain belongs to 1694 * @route: XDomain route string 1695 * 1696 * Finds XDomain by walking through the Thunderbolt topology below @tb. 1697 * The returned XDomain will have its reference count increased so the 1698 * caller needs to call tb_xdomain_put() when it is done with the 1699 * object. 1700 * 1701 * This will find all XDomains including the ones that are not yet added 1702 * to the bus (handshake is still in progress). 1703 * 1704 * The caller needs to hold @tb->lock. 1705 */ 1706 struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route) 1707 { 1708 struct tb_xdomain_lookup lookup; 1709 struct tb_xdomain *xd; 1710 1711 memset(&lookup, 0, sizeof(lookup)); 1712 lookup.route = route; 1713 1714 xd = switch_find_xdomain(tb->root_switch, &lookup); 1715 return tb_xdomain_get(xd); 1716 } 1717 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route); 1718 1719 bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type, 1720 const void *buf, size_t size) 1721 { 1722 const struct tb_protocol_handler *handler, *tmp; 1723 const struct tb_xdp_header *hdr = buf; 1724 unsigned int length; 1725 int ret = 0; 1726 1727 /* We expect the packet is at least size of the header */ 1728 length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK; 1729 if (length != size / 4 - sizeof(hdr->xd_hdr) / 4) 1730 return true; 1731 if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4) 1732 return true; 1733 1734 /* 1735 * Handle XDomain discovery protocol packets directly here. For 1736 * other protocols (based on their UUID) we call registered 1737 * handlers in turn. 1738 */ 1739 if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) { 1740 if (type == TB_CFG_PKG_XDOMAIN_REQ) 1741 return tb_xdp_schedule_request(tb, hdr, size); 1742 return false; 1743 } 1744 1745 mutex_lock(&xdomain_lock); 1746 list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) { 1747 if (!uuid_equal(&hdr->uuid, handler->uuid)) 1748 continue; 1749 1750 mutex_unlock(&xdomain_lock); 1751 ret = handler->callback(buf, size, handler->data); 1752 mutex_lock(&xdomain_lock); 1753 1754 if (ret) 1755 break; 1756 } 1757 mutex_unlock(&xdomain_lock); 1758 1759 return ret > 0; 1760 } 1761 1762 static int update_xdomain(struct device *dev, void *data) 1763 { 1764 struct tb_xdomain *xd; 1765 1766 xd = tb_to_xdomain(dev); 1767 if (xd) { 1768 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, 1769 msecs_to_jiffies(50)); 1770 } 1771 1772 return 0; 1773 } 1774 1775 static void update_all_xdomains(void) 1776 { 1777 bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain); 1778 } 1779 1780 static bool remove_directory(const char *key, const struct tb_property_dir *dir) 1781 { 1782 struct tb_property *p; 1783 1784 p = tb_property_find(xdomain_property_dir, key, 1785 TB_PROPERTY_TYPE_DIRECTORY); 1786 if (p && p->value.dir == dir) { 1787 tb_property_remove(p); 1788 return true; 1789 } 1790 return false; 1791 } 1792 1793 /** 1794 * tb_register_property_dir() - Register property directory to the host 1795 * @key: Key (name) of the directory to add 1796 * @dir: Directory to add 1797 * 1798 * Service drivers can use this function to add new property directory 1799 * to the host available properties. The other connected hosts are 1800 * notified so they can re-read properties of this host if they are 1801 * interested. 1802 * 1803 * Return: %0 on success and negative errno on failure 1804 */ 1805 int tb_register_property_dir(const char *key, struct tb_property_dir *dir) 1806 { 1807 int ret; 1808 1809 if (WARN_ON(!xdomain_property_dir)) 1810 return -EAGAIN; 1811 1812 if (!key || strlen(key) > 8) 1813 return -EINVAL; 1814 1815 mutex_lock(&xdomain_lock); 1816 if (tb_property_find(xdomain_property_dir, key, 1817 TB_PROPERTY_TYPE_DIRECTORY)) { 1818 ret = -EEXIST; 1819 goto err_unlock; 1820 } 1821 1822 ret = tb_property_add_dir(xdomain_property_dir, key, dir); 1823 if (ret) 1824 goto err_unlock; 1825 1826 ret = rebuild_property_block(); 1827 if (ret) { 1828 remove_directory(key, dir); 1829 goto err_unlock; 1830 } 1831 1832 mutex_unlock(&xdomain_lock); 1833 update_all_xdomains(); 1834 return 0; 1835 1836 err_unlock: 1837 mutex_unlock(&xdomain_lock); 1838 return ret; 1839 } 1840 EXPORT_SYMBOL_GPL(tb_register_property_dir); 1841 1842 /** 1843 * tb_unregister_property_dir() - Removes property directory from host 1844 * @key: Key (name) of the directory 1845 * @dir: Directory to remove 1846 * 1847 * This will remove the existing directory from this host and notify the 1848 * connected hosts about the change. 1849 */ 1850 void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir) 1851 { 1852 int ret = 0; 1853 1854 mutex_lock(&xdomain_lock); 1855 if (remove_directory(key, dir)) 1856 ret = rebuild_property_block(); 1857 mutex_unlock(&xdomain_lock); 1858 1859 if (!ret) 1860 update_all_xdomains(); 1861 } 1862 EXPORT_SYMBOL_GPL(tb_unregister_property_dir); 1863 1864 int tb_xdomain_init(void) 1865 { 1866 xdomain_property_dir = tb_property_create_dir(NULL); 1867 if (!xdomain_property_dir) 1868 return -ENOMEM; 1869 1870 /* 1871 * Initialize standard set of properties without any service 1872 * directories. Those will be added by service drivers 1873 * themselves when they are loaded. 1874 * 1875 * We also add node name later when first connection is made. 1876 */ 1877 tb_property_add_immediate(xdomain_property_dir, "vendorid", 1878 PCI_VENDOR_ID_INTEL); 1879 tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp."); 1880 tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1); 1881 tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100); 1882 1883 return 0; 1884 } 1885 1886 void tb_xdomain_exit(void) 1887 { 1888 kfree(xdomain_property_block); 1889 tb_property_free_dir(xdomain_property_dir); 1890 } 1891