1 /* 2 * Thunderbolt Cactus Ridge driver - control channel and configuration commands 3 * 4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 5 */ 6 7 #include <linux/crc32.h> 8 #include <linux/slab.h> 9 #include <linux/pci.h> 10 #include <linux/dmapool.h> 11 #include <linux/workqueue.h> 12 #include <linux/kfifo.h> 13 14 #include "ctl.h" 15 16 17 struct ctl_pkg { 18 struct tb_ctl *ctl; 19 void *buffer; 20 struct ring_frame frame; 21 }; 22 23 #define TB_CTL_RX_PKG_COUNT 10 24 25 /** 26 * struct tb_cfg - thunderbolt control channel 27 */ 28 struct tb_ctl { 29 struct tb_nhi *nhi; 30 struct tb_ring *tx; 31 struct tb_ring *rx; 32 33 struct dma_pool *frame_pool; 34 struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT]; 35 DECLARE_KFIFO(response_fifo, struct ctl_pkg*, 16); 36 struct completion response_ready; 37 38 hotplug_cb callback; 39 void *callback_data; 40 }; 41 42 43 #define tb_ctl_WARN(ctl, format, arg...) \ 44 dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg) 45 46 #define tb_ctl_err(ctl, format, arg...) \ 47 dev_err(&(ctl)->nhi->pdev->dev, format, ## arg) 48 49 #define tb_ctl_warn(ctl, format, arg...) \ 50 dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg) 51 52 #define tb_ctl_info(ctl, format, arg...) \ 53 dev_info(&(ctl)->nhi->pdev->dev, format, ## arg) 54 55 56 /* configuration packets definitions */ 57 58 enum tb_cfg_pkg_type { 59 TB_CFG_PKG_READ = 1, 60 TB_CFG_PKG_WRITE = 2, 61 TB_CFG_PKG_ERROR = 3, 62 TB_CFG_PKG_NOTIFY_ACK = 4, 63 TB_CFG_PKG_EVENT = 5, 64 TB_CFG_PKG_XDOMAIN_REQ = 6, 65 TB_CFG_PKG_XDOMAIN_RESP = 7, 66 TB_CFG_PKG_OVERRIDE = 8, 67 TB_CFG_PKG_RESET = 9, 68 TB_CFG_PKG_PREPARE_TO_SLEEP = 0xd, 69 }; 70 71 /* common header */ 72 struct tb_cfg_header { 73 u32 route_hi:22; 74 u32 unknown:10; /* highest order bit is set on replies */ 75 u32 route_lo; 76 } __packed; 77 78 /* additional header for read/write packets */ 79 struct tb_cfg_address { 80 u32 offset:13; /* in dwords */ 81 u32 length:6; /* in dwords */ 82 u32 port:6; 83 enum tb_cfg_space space:2; 84 u32 seq:2; /* sequence number */ 85 u32 zero:3; 86 } __packed; 87 88 /* TB_CFG_PKG_READ, response for TB_CFG_PKG_WRITE */ 89 struct cfg_read_pkg { 90 struct tb_cfg_header header; 91 struct tb_cfg_address addr; 92 } __packed; 93 94 /* TB_CFG_PKG_WRITE, response for TB_CFG_PKG_READ */ 95 struct cfg_write_pkg { 96 struct tb_cfg_header header; 97 struct tb_cfg_address addr; 98 u32 data[64]; /* maximum size, tb_cfg_address.length has 6 bits */ 99 } __packed; 100 101 /* TB_CFG_PKG_ERROR */ 102 struct cfg_error_pkg { 103 struct tb_cfg_header header; 104 enum tb_cfg_error error:4; 105 u32 zero1:4; 106 u32 port:6; 107 u32 zero2:2; /* Both should be zero, still they are different fields. */ 108 u32 zero3:16; 109 } __packed; 110 111 /* TB_CFG_PKG_EVENT */ 112 struct cfg_event_pkg { 113 struct tb_cfg_header header; 114 u32 port:6; 115 u32 zero:25; 116 bool unplug:1; 117 } __packed; 118 119 /* TB_CFG_PKG_RESET */ 120 struct cfg_reset_pkg { 121 struct tb_cfg_header header; 122 } __packed; 123 124 /* TB_CFG_PKG_PREPARE_TO_SLEEP */ 125 struct cfg_pts_pkg { 126 struct tb_cfg_header header; 127 u32 data; 128 } __packed; 129 130 131 /* utility functions */ 132 133 static u64 get_route(struct tb_cfg_header header) 134 { 135 return (u64) header.route_hi << 32 | header.route_lo; 136 } 137 138 static struct tb_cfg_header make_header(u64 route) 139 { 140 struct tb_cfg_header header = { 141 .route_hi = route >> 32, 142 .route_lo = route, 143 }; 144 /* check for overflow, route_hi is not 32 bits! */ 145 WARN_ON(get_route(header) != route); 146 return header; 147 } 148 149 static int check_header(struct ctl_pkg *pkg, u32 len, enum tb_cfg_pkg_type type, 150 u64 route) 151 { 152 struct tb_cfg_header *header = pkg->buffer; 153 154 /* check frame, TODO: frame flags */ 155 if (WARN(len != pkg->frame.size, 156 "wrong framesize (expected %#x, got %#x)\n", 157 len, pkg->frame.size)) 158 return -EIO; 159 if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n", 160 type, pkg->frame.eof)) 161 return -EIO; 162 if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n", 163 pkg->frame.sof)) 164 return -EIO; 165 166 /* check header */ 167 if (WARN(header->unknown != 1 << 9, 168 "header->unknown is %#x\n", header->unknown)) 169 return -EIO; 170 if (WARN(route != get_route(*header), 171 "wrong route (expected %llx, got %llx)", 172 route, get_route(*header))) 173 return -EIO; 174 return 0; 175 } 176 177 static int check_config_address(struct tb_cfg_address addr, 178 enum tb_cfg_space space, u32 offset, 179 u32 length) 180 { 181 if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero)) 182 return -EIO; 183 if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)", 184 space, addr.space)) 185 return -EIO; 186 if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)", 187 offset, addr.offset)) 188 return -EIO; 189 if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)", 190 length, addr.length)) 191 return -EIO; 192 if (WARN(addr.seq, "addr.seq is %#x\n", addr.seq)) 193 return -EIO; 194 /* 195 * We cannot check addr->port as it is set to the upstream port of the 196 * sender. 197 */ 198 return 0; 199 } 200 201 static struct tb_cfg_result decode_error(struct ctl_pkg *response) 202 { 203 struct cfg_error_pkg *pkg = response->buffer; 204 struct tb_cfg_result res = { 0 }; 205 res.response_route = get_route(pkg->header); 206 res.response_port = 0; 207 res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR, 208 get_route(pkg->header)); 209 if (res.err) 210 return res; 211 212 WARN(pkg->zero1, "pkg->zero1 is %#x\n", pkg->zero1); 213 WARN(pkg->zero2, "pkg->zero1 is %#x\n", pkg->zero1); 214 WARN(pkg->zero3, "pkg->zero1 is %#x\n", pkg->zero1); 215 res.err = 1; 216 res.tb_error = pkg->error; 217 res.response_port = pkg->port; 218 return res; 219 220 } 221 222 static struct tb_cfg_result parse_header(struct ctl_pkg *pkg, u32 len, 223 enum tb_cfg_pkg_type type, u64 route) 224 { 225 struct tb_cfg_header *header = pkg->buffer; 226 struct tb_cfg_result res = { 0 }; 227 228 if (pkg->frame.eof == TB_CFG_PKG_ERROR) 229 return decode_error(pkg); 230 231 res.response_port = 0; /* will be updated later for cfg_read/write */ 232 res.response_route = get_route(*header); 233 res.err = check_header(pkg, len, type, route); 234 return res; 235 } 236 237 static void tb_cfg_print_error(struct tb_ctl *ctl, 238 const struct tb_cfg_result *res) 239 { 240 WARN_ON(res->err != 1); 241 switch (res->tb_error) { 242 case TB_CFG_ERROR_PORT_NOT_CONNECTED: 243 /* Port is not connected. This can happen during surprise 244 * removal. Do not warn. */ 245 return; 246 case TB_CFG_ERROR_INVALID_CONFIG_SPACE: 247 /* 248 * Invalid cfg_space/offset/length combination in 249 * cfg_read/cfg_write. 250 */ 251 tb_ctl_WARN(ctl, 252 "CFG_ERROR(%llx:%x): Invalid config space or offset\n", 253 res->response_route, res->response_port); 254 return; 255 case TB_CFG_ERROR_NO_SUCH_PORT: 256 /* 257 * - The route contains a non-existent port. 258 * - The route contains a non-PHY port (e.g. PCIe). 259 * - The port in cfg_read/cfg_write does not exist. 260 */ 261 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n", 262 res->response_route, res->response_port); 263 return; 264 case TB_CFG_ERROR_LOOP: 265 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n", 266 res->response_route, res->response_port); 267 return; 268 default: 269 /* 5,6,7,9 and 11 are also valid error codes */ 270 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n", 271 res->response_route, res->response_port); 272 return; 273 } 274 } 275 276 static void cpu_to_be32_array(__be32 *dst, u32 *src, size_t len) 277 { 278 int i; 279 for (i = 0; i < len; i++) 280 dst[i] = cpu_to_be32(src[i]); 281 } 282 283 static void be32_to_cpu_array(u32 *dst, __be32 *src, size_t len) 284 { 285 int i; 286 for (i = 0; i < len; i++) 287 dst[i] = be32_to_cpu(src[i]); 288 } 289 290 static __be32 tb_crc(void *data, size_t len) 291 { 292 return cpu_to_be32(~__crc32c_le(~0, data, len)); 293 } 294 295 static void tb_ctl_pkg_free(struct ctl_pkg *pkg) 296 { 297 if (pkg) { 298 dma_pool_free(pkg->ctl->frame_pool, 299 pkg->buffer, pkg->frame.buffer_phy); 300 kfree(pkg); 301 } 302 } 303 304 static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl) 305 { 306 struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL); 307 if (!pkg) 308 return NULL; 309 pkg->ctl = ctl; 310 pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL, 311 &pkg->frame.buffer_phy); 312 if (!pkg->buffer) { 313 kfree(pkg); 314 return NULL; 315 } 316 return pkg; 317 } 318 319 320 /* RX/TX handling */ 321 322 static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame, 323 bool canceled) 324 { 325 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); 326 tb_ctl_pkg_free(pkg); 327 } 328 329 /** 330 * tb_cfg_tx() - transmit a packet on the control channel 331 * 332 * len must be a multiple of four. 333 * 334 * Return: Returns 0 on success or an error code on failure. 335 */ 336 static int tb_ctl_tx(struct tb_ctl *ctl, void *data, size_t len, 337 enum tb_cfg_pkg_type type) 338 { 339 int res; 340 struct ctl_pkg *pkg; 341 if (len % 4 != 0) { /* required for le->be conversion */ 342 tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len); 343 return -EINVAL; 344 } 345 if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */ 346 tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n", 347 len, TB_FRAME_SIZE - 4); 348 return -EINVAL; 349 } 350 pkg = tb_ctl_pkg_alloc(ctl); 351 if (!pkg) 352 return -ENOMEM; 353 pkg->frame.callback = tb_ctl_tx_callback; 354 pkg->frame.size = len + 4; 355 pkg->frame.sof = type; 356 pkg->frame.eof = type; 357 cpu_to_be32_array(pkg->buffer, data, len / 4); 358 *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len); 359 360 res = ring_tx(ctl->tx, &pkg->frame); 361 if (res) /* ring is stopped */ 362 tb_ctl_pkg_free(pkg); 363 return res; 364 } 365 366 /** 367 * tb_ctl_handle_plug_event() - acknowledge a plug event, invoke ctl->callback 368 */ 369 static void tb_ctl_handle_plug_event(struct tb_ctl *ctl, 370 struct ctl_pkg *response) 371 { 372 struct cfg_event_pkg *pkg = response->buffer; 373 u64 route = get_route(pkg->header); 374 375 if (check_header(response, sizeof(*pkg), TB_CFG_PKG_EVENT, route)) { 376 tb_ctl_warn(ctl, "malformed TB_CFG_PKG_EVENT\n"); 377 return; 378 } 379 380 if (tb_cfg_error(ctl, route, pkg->port, TB_CFG_ERROR_ACK_PLUG_EVENT)) 381 tb_ctl_warn(ctl, "could not ack plug event on %llx:%x\n", 382 route, pkg->port); 383 WARN(pkg->zero, "pkg->zero is %#x\n", pkg->zero); 384 ctl->callback(ctl->callback_data, route, pkg->port, pkg->unplug); 385 } 386 387 static void tb_ctl_rx_submit(struct ctl_pkg *pkg) 388 { 389 ring_rx(pkg->ctl->rx, &pkg->frame); /* 390 * We ignore failures during stop. 391 * All rx packets are referenced 392 * from ctl->rx_packets, so we do 393 * not loose them. 394 */ 395 } 396 397 static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame, 398 bool canceled) 399 { 400 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); 401 402 if (canceled) 403 return; /* 404 * ring is stopped, packet is referenced from 405 * ctl->rx_packets. 406 */ 407 408 if (frame->size < 4 || frame->size % 4 != 0) { 409 tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n", 410 frame->size); 411 goto rx; 412 } 413 414 frame->size -= 4; /* remove checksum */ 415 if (*(__be32 *) (pkg->buffer + frame->size) 416 != tb_crc(pkg->buffer, frame->size)) { 417 tb_ctl_err(pkg->ctl, 418 "RX: checksum mismatch, dropping packet\n"); 419 goto rx; 420 } 421 be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4); 422 423 if (frame->eof == TB_CFG_PKG_EVENT) { 424 tb_ctl_handle_plug_event(pkg->ctl, pkg); 425 goto rx; 426 } 427 if (!kfifo_put(&pkg->ctl->response_fifo, pkg)) { 428 tb_ctl_err(pkg->ctl, "RX: fifo is full\n"); 429 goto rx; 430 } 431 complete(&pkg->ctl->response_ready); 432 return; 433 rx: 434 tb_ctl_rx_submit(pkg); 435 } 436 437 /** 438 * tb_ctl_rx() - receive a packet from the control channel 439 */ 440 static struct tb_cfg_result tb_ctl_rx(struct tb_ctl *ctl, void *buffer, 441 size_t length, int timeout_msec, 442 u64 route, enum tb_cfg_pkg_type type) 443 { 444 struct tb_cfg_result res; 445 struct ctl_pkg *pkg; 446 447 if (!wait_for_completion_timeout(&ctl->response_ready, 448 msecs_to_jiffies(timeout_msec))) { 449 tb_ctl_WARN(ctl, "RX: timeout\n"); 450 return (struct tb_cfg_result) { .err = -ETIMEDOUT }; 451 } 452 if (!kfifo_get(&ctl->response_fifo, &pkg)) { 453 tb_ctl_WARN(ctl, "empty kfifo\n"); 454 return (struct tb_cfg_result) { .err = -EIO }; 455 } 456 457 res = parse_header(pkg, length, type, route); 458 if (!res.err) 459 memcpy(buffer, pkg->buffer, length); 460 tb_ctl_rx_submit(pkg); 461 return res; 462 } 463 464 465 /* public interface, alloc/start/stop/free */ 466 467 /** 468 * tb_ctl_alloc() - allocate a control channel 469 * 470 * cb will be invoked once for every hot plug event. 471 * 472 * Return: Returns a pointer on success or NULL on failure. 473 */ 474 struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, hotplug_cb cb, void *cb_data) 475 { 476 int i; 477 struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); 478 if (!ctl) 479 return NULL; 480 ctl->nhi = nhi; 481 ctl->callback = cb; 482 ctl->callback_data = cb_data; 483 484 init_completion(&ctl->response_ready); 485 INIT_KFIFO(ctl->response_fifo); 486 ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev, 487 TB_FRAME_SIZE, 4, 0); 488 if (!ctl->frame_pool) 489 goto err; 490 491 ctl->tx = ring_alloc_tx(nhi, 0, 10); 492 if (!ctl->tx) 493 goto err; 494 495 ctl->rx = ring_alloc_rx(nhi, 0, 10); 496 if (!ctl->rx) 497 goto err; 498 499 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) { 500 ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl); 501 if (!ctl->rx_packets[i]) 502 goto err; 503 ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback; 504 } 505 506 tb_ctl_info(ctl, "control channel created\n"); 507 return ctl; 508 err: 509 tb_ctl_free(ctl); 510 return NULL; 511 } 512 513 /** 514 * tb_ctl_free() - free a control channel 515 * 516 * Must be called after tb_ctl_stop. 517 * 518 * Must NOT be called from ctl->callback. 519 */ 520 void tb_ctl_free(struct tb_ctl *ctl) 521 { 522 int i; 523 if (ctl->rx) 524 ring_free(ctl->rx); 525 if (ctl->tx) 526 ring_free(ctl->tx); 527 528 /* free RX packets */ 529 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) 530 tb_ctl_pkg_free(ctl->rx_packets[i]); 531 532 533 if (ctl->frame_pool) 534 dma_pool_destroy(ctl->frame_pool); 535 kfree(ctl); 536 } 537 538 /** 539 * tb_cfg_start() - start/resume the control channel 540 */ 541 void tb_ctl_start(struct tb_ctl *ctl) 542 { 543 int i; 544 tb_ctl_info(ctl, "control channel starting...\n"); 545 ring_start(ctl->tx); /* is used to ack hotplug packets, start first */ 546 ring_start(ctl->rx); 547 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) 548 tb_ctl_rx_submit(ctl->rx_packets[i]); 549 } 550 551 /** 552 * control() - pause the control channel 553 * 554 * All invocations of ctl->callback will have finished after this method 555 * returns. 556 * 557 * Must NOT be called from ctl->callback. 558 */ 559 void tb_ctl_stop(struct tb_ctl *ctl) 560 { 561 ring_stop(ctl->rx); 562 ring_stop(ctl->tx); 563 564 if (!kfifo_is_empty(&ctl->response_fifo)) 565 tb_ctl_WARN(ctl, "dangling response in response_fifo\n"); 566 kfifo_reset(&ctl->response_fifo); 567 tb_ctl_info(ctl, "control channel stopped\n"); 568 } 569 570 /* public interface, commands */ 571 572 /** 573 * tb_cfg_error() - send error packet 574 * 575 * Return: Returns 0 on success or an error code on failure. 576 */ 577 int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port, 578 enum tb_cfg_error error) 579 { 580 struct cfg_error_pkg pkg = { 581 .header = make_header(route), 582 .port = port, 583 .error = error, 584 }; 585 tb_ctl_info(ctl, "resetting error on %llx:%x.\n", route, port); 586 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR); 587 } 588 589 /** 590 * tb_cfg_reset() - send a reset packet and wait for a response 591 * 592 * If the switch at route is incorrectly configured then we will not receive a 593 * reply (even though the switch will reset). The caller should check for 594 * -ETIMEDOUT and attempt to reconfigure the switch. 595 */ 596 struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route, 597 int timeout_msec) 598 { 599 int err; 600 struct cfg_reset_pkg request = { .header = make_header(route) }; 601 struct tb_cfg_header reply; 602 603 err = tb_ctl_tx(ctl, &request, sizeof(request), TB_CFG_PKG_RESET); 604 if (err) 605 return (struct tb_cfg_result) { .err = err }; 606 607 return tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route, 608 TB_CFG_PKG_RESET); 609 } 610 611 /** 612 * tb_cfg_read() - read from config space into buffer 613 * 614 * Offset and length are in dwords. 615 */ 616 struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer, 617 u64 route, u32 port, enum tb_cfg_space space, 618 u32 offset, u32 length, int timeout_msec) 619 { 620 struct tb_cfg_result res = { 0 }; 621 struct cfg_read_pkg request = { 622 .header = make_header(route), 623 .addr = { 624 .port = port, 625 .space = space, 626 .offset = offset, 627 .length = length, 628 }, 629 }; 630 struct cfg_write_pkg reply; 631 632 res.err = tb_ctl_tx(ctl, &request, sizeof(request), TB_CFG_PKG_READ); 633 if (res.err) 634 return res; 635 636 res = tb_ctl_rx(ctl, &reply, 12 + 4 * length, timeout_msec, route, 637 TB_CFG_PKG_READ); 638 if (res.err) 639 return res; 640 641 res.response_port = reply.addr.port; 642 res.err = check_config_address(reply.addr, space, offset, length); 643 if (!res.err) 644 memcpy(buffer, &reply.data, 4 * length); 645 return res; 646 } 647 648 /** 649 * tb_cfg_write() - write from buffer into config space 650 * 651 * Offset and length are in dwords. 652 */ 653 struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, void *buffer, 654 u64 route, u32 port, enum tb_cfg_space space, 655 u32 offset, u32 length, int timeout_msec) 656 { 657 struct tb_cfg_result res = { 0 }; 658 struct cfg_write_pkg request = { 659 .header = make_header(route), 660 .addr = { 661 .port = port, 662 .space = space, 663 .offset = offset, 664 .length = length, 665 }, 666 }; 667 struct cfg_read_pkg reply; 668 669 memcpy(&request.data, buffer, length * 4); 670 671 res.err = tb_ctl_tx(ctl, &request, 12 + 4 * length, TB_CFG_PKG_WRITE); 672 if (res.err) 673 return res; 674 675 res = tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route, 676 TB_CFG_PKG_WRITE); 677 if (res.err) 678 return res; 679 680 res.response_port = reply.addr.port; 681 res.err = check_config_address(reply.addr, space, offset, length); 682 return res; 683 } 684 685 int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, 686 enum tb_cfg_space space, u32 offset, u32 length) 687 { 688 struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port, 689 space, offset, length, TB_CFG_DEFAULT_TIMEOUT); 690 if (res.err == 1) { 691 tb_cfg_print_error(ctl, &res); 692 return -EIO; 693 } 694 WARN(res.err, "tb_cfg_read: %d\n", res.err); 695 return res.err; 696 } 697 698 int tb_cfg_write(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, 699 enum tb_cfg_space space, u32 offset, u32 length) 700 { 701 struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port, 702 space, offset, length, TB_CFG_DEFAULT_TIMEOUT); 703 if (res.err == 1) { 704 tb_cfg_print_error(ctl, &res); 705 return -EIO; 706 } 707 WARN(res.err, "tb_cfg_write: %d\n", res.err); 708 return res.err; 709 } 710 711 /** 712 * tb_cfg_get_upstream_port() - get upstream port number of switch at route 713 * 714 * Reads the first dword from the switches TB_CFG_SWITCH config area and 715 * returns the port number from which the reply originated. 716 * 717 * Return: Returns the upstream port number on success or an error code on 718 * failure. 719 */ 720 int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route) 721 { 722 u32 dummy; 723 struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0, 724 TB_CFG_SWITCH, 0, 1, 725 TB_CFG_DEFAULT_TIMEOUT); 726 if (res.err == 1) 727 return -EIO; 728 if (res.err) 729 return res.err; 730 return res.response_port; 731 } 732