1 /* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */ 2 3 #if HAVE_CONFIG_H 4 #include "config.h" 5 #endif 6 7 #if HAVE_ENDIAN_H 8 #include <endian.h> 9 #endif 10 11 #include <assert.h> 12 #include <err.h> 13 #include <errno.h> 14 #include <inttypes.h> 15 #include <stdbool.h> 16 #include <stdlib.h> 17 #include <string.h> 18 19 #define pr_fmt(x) "astlpc: " x 20 21 #include "container_of.h" 22 #include "crc32.h" 23 #include "libmctp.h" 24 #include "libmctp-alloc.h" 25 #include "libmctp-log.h" 26 #include "libmctp-astlpc.h" 27 #include "range.h" 28 29 #ifdef MCTP_HAVE_FILEIO 30 31 #include <unistd.h> 32 #include <fcntl.h> 33 #include <poll.h> 34 #include <sys/ioctl.h> 35 #include <sys/mman.h> 36 #include <linux/aspeed-lpc-ctrl.h> 37 38 /* kernel interface */ 39 static const char *kcs_path = "/dev/mctp0"; 40 static const char *lpc_path = "/dev/aspeed-lpc-ctrl"; 41 42 #endif 43 44 enum mctp_astlpc_cmd { 45 cmd_initialise = 0x00, 46 cmd_tx_begin = 0x01, 47 cmd_rx_complete = 0x02, 48 cmd_dummy_value = 0xff, 49 }; 50 51 enum mctp_astlpc_buffer_state { 52 /* 53 * Prior to "Channel Ready" we mark the buffers as "idle" to catch illegal accesses. In this 54 * state neither side is considered the owner of the buffer. 55 * 56 * Upon "Channel Ready", each side transitions the buffers from the initial "idle" state 57 * to the following target states: 58 * 59 * Tx buffer: "acquired" 60 * Rx buffer: "released" 61 */ 62 buffer_state_idle, 63 64 /* 65 * Beyond initialisation by "Channel Ready", buffers are in the "acquired" state once: 66 * 67 * 1. We dequeue a control command transferring the buffer to our ownership out of the KCS 68 * interface, and 69 * 2. We are yet to complete all of our required accesses to the buffer 70 * 71 * * The Tx buffer enters the "acquired" state when we dequeue the "Rx Complete" command 72 * * The Rx buffer enters the "acquired" state when we dequeue the "Tx Begin" command 73 * 74 * It is a failure of implementation if it's possible for both sides to simultaneously 75 * consider a buffer as "acquired". 76 */ 77 buffer_state_acquired, 78 79 /* 80 * Buffers are in the "prepared" state when: 81 * 82 * 1. We have completed all of our required accesses (read or write) for the buffer, and 83 * 2. We have not yet successfully enqueued the control command to hand off ownership 84 */ 85 buffer_state_prepared, 86 87 /* 88 * Beyond initialisation by "Channel Ready", buffers are in the "released" state once: 89 * 90 * 1. We successfully enqueue the control command transferring ownership to the remote 91 * side in to the KCS interface 92 * 93 * * The Tx buffer enters the "released" state when we enqueue the "Tx Begin" command 94 * * The Rx buffer enters the "released" state when we enqueue the "Rx Complete" command 95 * 96 * It may be the case that both sides simultaneously consider a buffer to be in the 97 * "released" state. However, if this is true, it must also be true that a buffer ownership 98 * transfer command has been enqueued in the KCS interface and is yet to be dequeued. 99 */ 100 buffer_state_released, 101 }; 102 103 struct mctp_astlpc_buffer { 104 uint32_t offset; 105 uint32_t size; 106 enum mctp_astlpc_buffer_state state; 107 }; 108 109 struct mctp_astlpc_layout { 110 struct mctp_astlpc_buffer rx; 111 struct mctp_astlpc_buffer tx; 112 }; 113 114 struct mctp_astlpc_protocol { 115 uint16_t version; 116 uint32_t (*packet_size)(uint32_t body); 117 uint32_t (*body_size)(uint32_t packet); 118 void (*pktbuf_protect)(struct mctp_pktbuf *pkt); 119 bool (*pktbuf_validate)(struct mctp_pktbuf *pkt); 120 }; 121 122 struct mctp_binding_astlpc { 123 struct mctp_binding binding; 124 125 void *lpc_map; 126 struct mctp_astlpc_layout layout; 127 128 uint8_t mode; 129 uint32_t requested_mtu; 130 131 const struct mctp_astlpc_protocol *proto; 132 133 /* direct ops data */ 134 struct mctp_binding_astlpc_ops ops; 135 void *ops_data; 136 137 /* fileio ops data */ 138 int kcs_fd; 139 uint8_t kcs_status; 140 }; 141 142 #define binding_to_astlpc(b) \ 143 container_of(b, struct mctp_binding_astlpc, binding) 144 145 #define astlpc_prlog(ctx, lvl, fmt, ...) \ 146 do { \ 147 bool __bmc = ((ctx)->mode == MCTP_BINDING_ASTLPC_MODE_BMC); \ 148 mctp_prlog(lvl, pr_fmt("%s: " fmt), __bmc ? "bmc" : "host", \ 149 ##__VA_ARGS__); \ 150 } while (0) 151 152 #define astlpc_prerr(ctx, fmt, ...) \ 153 astlpc_prlog(ctx, MCTP_LOG_ERR, fmt, ##__VA_ARGS__) 154 #define astlpc_prwarn(ctx, fmt, ...) \ 155 astlpc_prlog(ctx, MCTP_LOG_WARNING, fmt, ##__VA_ARGS__) 156 #define astlpc_prnotice(ctx, fmt, ...) \ 157 astlpc_prlog(ctx, MCTP_LOG_NOTICE, fmt, ##__VA_ARGS__) 158 #define astlpc_prinfo(ctx, fmt, ...) \ 159 astlpc_prlog(ctx, MCTP_LOG_INFO, fmt, ##__VA_ARGS__) 160 #define astlpc_prdebug(ctx, fmt, ...) \ 161 astlpc_prlog(ctx, MCTP_LOG_DEBUG, fmt, ##__VA_ARGS__) 162 163 /* clang-format off */ 164 #define ASTLPC_MCTP_MAGIC 0x4d435450 165 #define ASTLPC_VER_BAD 0 166 #define ASTLPC_VER_MIN 1 167 168 /* Support testing of new binding protocols */ 169 #ifndef ASTLPC_VER_CUR 170 #define ASTLPC_VER_CUR 3 171 #endif 172 /* clang-format on */ 173 174 #ifndef ARRAY_SIZE 175 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) 176 #endif 177 178 static uint32_t astlpc_packet_size_v1(uint32_t body) 179 { 180 assert((body + 4) > body); 181 182 return body + 4; 183 } 184 185 static uint32_t astlpc_body_size_v1(uint32_t packet) 186 { 187 assert((packet - 4) < packet); 188 189 return packet - 4; 190 } 191 192 void astlpc_pktbuf_protect_v1(struct mctp_pktbuf *pkt) 193 { 194 (void)pkt; 195 } 196 197 bool astlpc_pktbuf_validate_v1(struct mctp_pktbuf *pkt) 198 { 199 (void)pkt; 200 return true; 201 } 202 203 static uint32_t astlpc_packet_size_v3(uint32_t body) 204 { 205 assert((body + 4 + 4) > body); 206 207 return body + 4 + 4; 208 } 209 210 static uint32_t astlpc_body_size_v3(uint32_t packet) 211 { 212 assert((packet - 4 - 4) < packet); 213 214 return packet - 4 - 4; 215 } 216 217 void astlpc_pktbuf_protect_v3(struct mctp_pktbuf *pkt) 218 { 219 uint32_t code; 220 221 code = htobe32(crc32(mctp_pktbuf_hdr(pkt), mctp_pktbuf_size(pkt))); 222 mctp_prdebug("%s: 0x%" PRIx32, __func__, code); 223 mctp_pktbuf_push(pkt, &code, 4); 224 } 225 226 bool astlpc_pktbuf_validate_v3(struct mctp_pktbuf *pkt) 227 { 228 uint32_t code; 229 void *check; 230 231 code = be32toh(crc32(mctp_pktbuf_hdr(pkt), mctp_pktbuf_size(pkt) - 4)); 232 mctp_prdebug("%s: 0x%" PRIx32, __func__, code); 233 check = mctp_pktbuf_pop(pkt, 4); 234 return check && !memcmp(&code, check, 4); 235 } 236 237 static const struct mctp_astlpc_protocol astlpc_protocol_version[] = { 238 [0] = { 239 .version = 0, 240 .packet_size = NULL, 241 .body_size = NULL, 242 .pktbuf_protect = NULL, 243 .pktbuf_validate = NULL, 244 }, 245 [1] = { 246 .version = 1, 247 .packet_size = astlpc_packet_size_v1, 248 .body_size = astlpc_body_size_v1, 249 .pktbuf_protect = astlpc_pktbuf_protect_v1, 250 .pktbuf_validate = astlpc_pktbuf_validate_v1, 251 }, 252 [2] = { 253 .version = 2, 254 .packet_size = astlpc_packet_size_v1, 255 .body_size = astlpc_body_size_v1, 256 .pktbuf_protect = astlpc_pktbuf_protect_v1, 257 .pktbuf_validate = astlpc_pktbuf_validate_v1, 258 }, 259 [3] = { 260 .version = 3, 261 .packet_size = astlpc_packet_size_v3, 262 .body_size = astlpc_body_size_v3, 263 .pktbuf_protect = astlpc_pktbuf_protect_v3, 264 .pktbuf_validate = astlpc_pktbuf_validate_v3, 265 }, 266 }; 267 268 struct mctp_lpcmap_hdr { 269 uint32_t magic; 270 271 uint16_t bmc_ver_min; 272 uint16_t bmc_ver_cur; 273 uint16_t host_ver_min; 274 uint16_t host_ver_cur; 275 uint16_t negotiated_ver; 276 uint16_t pad0; 277 278 struct { 279 uint32_t rx_offset; 280 uint32_t rx_size; 281 uint32_t tx_offset; 282 uint32_t tx_size; 283 } layout; 284 } __attribute__((packed)); 285 286 static const uint32_t control_size = 0x100; 287 288 #define LPC_WIN_SIZE (1 * 1024 * 1024) 289 290 #define KCS_STATUS_BMC_READY 0x80 291 #define KCS_STATUS_CHANNEL_ACTIVE 0x40 292 #define KCS_STATUS_IBF 0x02 293 #define KCS_STATUS_OBF 0x01 294 295 static inline int mctp_astlpc_kcs_write(struct mctp_binding_astlpc *astlpc, 296 enum mctp_binding_astlpc_kcs_reg reg, 297 uint8_t val) 298 { 299 return astlpc->ops.kcs_write(astlpc->ops_data, reg, val); 300 } 301 302 static inline int mctp_astlpc_kcs_read(struct mctp_binding_astlpc *astlpc, 303 enum mctp_binding_astlpc_kcs_reg reg, 304 uint8_t *val) 305 { 306 return astlpc->ops.kcs_read(astlpc->ops_data, reg, val); 307 } 308 309 static inline int mctp_astlpc_lpc_write(struct mctp_binding_astlpc *astlpc, 310 const void *buf, long offset, 311 size_t len) 312 { 313 astlpc_prdebug(astlpc, "%s: %zu bytes to 0x%lx", __func__, len, offset); 314 315 assert(offset >= 0); 316 317 /* Indirect access */ 318 if (astlpc->ops.lpc_write) { 319 void *data = astlpc->ops_data; 320 321 return astlpc->ops.lpc_write(data, buf, offset, len); 322 } 323 324 /* Direct mapping */ 325 assert(astlpc->lpc_map); 326 memcpy(&((char *)astlpc->lpc_map)[offset], buf, len); 327 328 return 0; 329 } 330 331 static inline int mctp_astlpc_lpc_read(struct mctp_binding_astlpc *astlpc, 332 void *buf, long offset, size_t len) 333 { 334 astlpc_prdebug(astlpc, "%s: %zu bytes from 0x%lx", __func__, len, 335 offset); 336 337 assert(offset >= 0); 338 339 /* Indirect access */ 340 if (astlpc->ops.lpc_read) { 341 void *data = astlpc->ops_data; 342 343 return astlpc->ops.lpc_read(data, buf, offset, len); 344 } 345 346 /* Direct mapping */ 347 assert(astlpc->lpc_map); 348 memcpy(buf, &((char *)astlpc->lpc_map)[offset], len); 349 350 return 0; 351 } 352 353 static void 354 mctp_astlpc_kcs_print_status_write(struct mctp_binding_astlpc *astlpc, 355 uint8_t status) 356 { 357 astlpc_prnotice( 358 astlpc, "Binding state is 0x%hhx: BMC %s, Channel %s, OBF %s", 359 status, status & KCS_STATUS_BMC_READY ? "active" : "inactive", 360 status & KCS_STATUS_CHANNEL_ACTIVE ? "active" : "inactive", 361 status & KCS_STATUS_OBF ? "preserved" : "cleared"); 362 } 363 364 static int mctp_astlpc_kcs_set_status(struct mctp_binding_astlpc *astlpc, 365 uint8_t status) 366 { 367 uint8_t data; 368 int rc; 369 370 /* Since we're setting the status register, we want the other endpoint 371 * to be interrupted. However, some hardware may only raise a host-side 372 * interrupt on an ODR event. 373 * So, write a dummy value of 0xff to ODR, which will ensure that an 374 * interrupt is triggered, and can be ignored by the host. 375 */ 376 data = cmd_dummy_value; 377 378 rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, status); 379 if (rc) { 380 astlpc_prwarn(astlpc, "KCS status write failed"); 381 return -1; 382 } 383 384 mctp_astlpc_kcs_print_status_write(astlpc, status); 385 386 rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, data); 387 if (rc) { 388 astlpc_prwarn(astlpc, "KCS dummy data write failed"); 389 return -1; 390 } 391 392 return 0; 393 } 394 395 static int mctp_astlpc_layout_read(struct mctp_binding_astlpc *astlpc, 396 struct mctp_astlpc_layout *layout) 397 { 398 struct mctp_lpcmap_hdr hdr; 399 int rc; 400 401 rc = mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr)); 402 if (rc < 0) 403 return rc; 404 405 /* Flip the buffers as the names are defined in terms of the host */ 406 if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) { 407 layout->rx.offset = be32toh(hdr.layout.tx_offset); 408 layout->rx.size = be32toh(hdr.layout.tx_size); 409 layout->tx.offset = be32toh(hdr.layout.rx_offset); 410 layout->tx.size = be32toh(hdr.layout.rx_size); 411 } else { 412 assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST); 413 414 layout->rx.offset = be32toh(hdr.layout.rx_offset); 415 layout->rx.size = be32toh(hdr.layout.rx_size); 416 layout->tx.offset = be32toh(hdr.layout.tx_offset); 417 layout->tx.size = be32toh(hdr.layout.tx_size); 418 } 419 420 return 0; 421 } 422 423 static int mctp_astlpc_layout_write(struct mctp_binding_astlpc *astlpc, 424 struct mctp_astlpc_layout *layout) 425 { 426 uint32_t rx_size_be; 427 428 if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) { 429 struct mctp_lpcmap_hdr hdr; 430 431 /* 432 * Flip the buffers as the names are defined in terms of the 433 * host 434 */ 435 hdr.layout.rx_offset = htobe32(layout->tx.offset); 436 hdr.layout.rx_size = htobe32(layout->tx.size); 437 hdr.layout.tx_offset = htobe32(layout->rx.offset); 438 hdr.layout.tx_size = htobe32(layout->rx.size); 439 440 return mctp_astlpc_lpc_write(astlpc, &hdr.layout, 441 offsetof(struct mctp_lpcmap_hdr, 442 layout), 443 sizeof(hdr.layout)); 444 } 445 446 assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST); 447 448 /* 449 * As of v2 we only need to write rx_size - the offsets are controlled 450 * by the BMC, as is the BMC's rx_size (host tx_size). 451 */ 452 rx_size_be = htobe32(layout->rx.size); 453 return mctp_astlpc_lpc_write(astlpc, &rx_size_be, 454 offsetof(struct mctp_lpcmap_hdr, 455 layout.rx_size), 456 sizeof(rx_size_be)); 457 } 458 459 static bool 460 mctp_astlpc_buffer_validate(const struct mctp_binding_astlpc *astlpc, 461 const struct mctp_astlpc_buffer *buf, 462 const char *name) 463 { 464 /* Check for overflow */ 465 if (buf->offset + buf->size < buf->offset) { 466 mctp_prerr( 467 "%s packet buffer parameters overflow: offset: 0x%" PRIx32 468 ", size: %" PRIu32, 469 name, buf->offset, buf->size); 470 return false; 471 } 472 473 /* Check that the buffers are contained within the allocated space */ 474 if (buf->offset + buf->size > LPC_WIN_SIZE) { 475 mctp_prerr( 476 "%s packet buffer parameters exceed %uM window size: offset: 0x%" PRIx32 477 ", size: %" PRIu32, 478 name, (LPC_WIN_SIZE / (1024 * 1024)), buf->offset, 479 buf->size); 480 return false; 481 } 482 483 /* Check that the baseline transmission unit is supported */ 484 if (buf->size < 485 astlpc->proto->packet_size(MCTP_PACKET_SIZE(MCTP_BTU))) { 486 mctp_prerr( 487 "%s packet buffer too small: Require %" PRIu32 488 " bytes to support the %u byte baseline transmission unit, found %" PRIu32, 489 name, 490 astlpc->proto->packet_size(MCTP_PACKET_SIZE(MCTP_BTU)), 491 MCTP_BTU, buf->size); 492 return false; 493 } 494 495 /* Check for overlap with the control space */ 496 if (buf->offset < control_size) { 497 mctp_prerr( 498 "%s packet buffer overlaps control region {0x%" PRIx32 499 ", %" PRIu32 "}: Rx {0x%" PRIx32 ", %" PRIu32 "}", 500 name, 0U, control_size, buf->offset, buf->size); 501 return false; 502 } 503 504 return true; 505 } 506 507 static bool 508 mctp_astlpc_layout_validate(const struct mctp_binding_astlpc *astlpc, 509 const struct mctp_astlpc_layout *layout) 510 { 511 const struct mctp_astlpc_buffer *rx = &layout->rx; 512 const struct mctp_astlpc_buffer *tx = &layout->tx; 513 bool rx_valid, tx_valid; 514 515 rx_valid = mctp_astlpc_buffer_validate(astlpc, rx, "Rx"); 516 tx_valid = mctp_astlpc_buffer_validate(astlpc, tx, "Tx"); 517 518 if (!(rx_valid && tx_valid)) 519 return false; 520 521 /* Check that the buffers are disjoint */ 522 if ((rx->offset <= tx->offset && rx->offset + rx->size > tx->offset) || 523 (tx->offset <= rx->offset && tx->offset + tx->size > rx->offset)) { 524 mctp_prerr("Rx and Tx packet buffers overlap: Rx {0x%" PRIx32 525 ", %" PRIu32 "}, Tx {0x%" PRIx32 ", %" PRIu32 "}", 526 rx->offset, rx->size, tx->offset, tx->size); 527 return false; 528 } 529 530 return true; 531 } 532 533 static int mctp_astlpc_init_bmc(struct mctp_binding_astlpc *astlpc) 534 { 535 struct mctp_lpcmap_hdr hdr = { 0 }; 536 uint8_t status; 537 uint32_t sz; 538 539 /* 540 * The largest buffer size is half of the allocated MCTP space 541 * excluding the control space. 542 */ 543 sz = ((LPC_WIN_SIZE - control_size) / 2); 544 545 /* 546 * Trim the MTU to a multiple of 16 to meet the requirements of 12.17 547 * Query Hop in DSP0236 v1.3.0. 548 */ 549 sz = MCTP_BODY_SIZE(astlpc->proto->body_size(sz)); 550 sz &= ~0xfUL; 551 sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(sz)); 552 553 if (astlpc->requested_mtu) { 554 uint32_t rpkt, rmtu; 555 556 rmtu = astlpc->requested_mtu; 557 rpkt = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu)); 558 sz = MIN(sz, rpkt); 559 } 560 561 /* Flip the buffers as the names are defined in terms of the host */ 562 astlpc->layout.tx.offset = control_size; 563 astlpc->layout.tx.size = sz; 564 astlpc->layout.rx.offset = 565 astlpc->layout.tx.offset + astlpc->layout.tx.size; 566 astlpc->layout.rx.size = sz; 567 568 if (!mctp_astlpc_layout_validate(astlpc, &astlpc->layout)) { 569 astlpc_prerr(astlpc, "Cannot support an MTU of %" PRIu32, sz); 570 return -EINVAL; 571 } 572 573 hdr = (struct mctp_lpcmap_hdr){ 574 .magic = htobe32(ASTLPC_MCTP_MAGIC), 575 .bmc_ver_min = htobe16(ASTLPC_VER_MIN), 576 .bmc_ver_cur = htobe16(ASTLPC_VER_CUR), 577 578 /* Flip the buffers back as we're now describing the host's 579 * configuration to the host */ 580 .layout.rx_offset = htobe32(astlpc->layout.tx.offset), 581 .layout.rx_size = htobe32(astlpc->layout.tx.size), 582 .layout.tx_offset = htobe32(astlpc->layout.rx.offset), 583 .layout.tx_size = htobe32(astlpc->layout.rx.size), 584 }; 585 586 mctp_astlpc_lpc_write(astlpc, &hdr, 0, sizeof(hdr)); 587 588 /* 589 * Set status indicating that the BMC is now active. Be explicit about 590 * clearing OBF; we're reinitialising the binding and so any previous 591 * buffer state is irrelevant. 592 */ 593 status = KCS_STATUS_BMC_READY & ~KCS_STATUS_OBF; 594 return mctp_astlpc_kcs_set_status(astlpc, status); 595 } 596 597 static int mctp_binding_astlpc_start_bmc(struct mctp_binding *b) 598 { 599 struct mctp_binding_astlpc *astlpc = 600 container_of(b, struct mctp_binding_astlpc, binding); 601 602 astlpc->proto = &astlpc_protocol_version[ASTLPC_VER_CUR]; 603 604 return mctp_astlpc_init_bmc(astlpc); 605 } 606 607 static bool mctp_astlpc_validate_version(uint16_t bmc_ver_min, 608 uint16_t bmc_ver_cur, 609 uint16_t host_ver_min, 610 uint16_t host_ver_cur) 611 { 612 if (!(bmc_ver_min && bmc_ver_cur && host_ver_min && host_ver_cur)) { 613 mctp_prerr("Invalid version present in [%" PRIu16 ", %" PRIu16 614 "], [%" PRIu16 ", %" PRIu16 "]", 615 bmc_ver_min, bmc_ver_cur, host_ver_min, 616 host_ver_cur); 617 return false; 618 } else if (bmc_ver_min > bmc_ver_cur) { 619 mctp_prerr("Invalid bmc version range [%" PRIu16 ", %" PRIu16 620 "]", 621 bmc_ver_min, bmc_ver_cur); 622 return false; 623 } else if (host_ver_min > host_ver_cur) { 624 mctp_prerr("Invalid host version range [%" PRIu16 ", %" PRIu16 625 "]", 626 host_ver_min, host_ver_cur); 627 return false; 628 } else if ((host_ver_cur < bmc_ver_min) || 629 (host_ver_min > bmc_ver_cur)) { 630 mctp_prerr( 631 "Unable to satisfy version negotiation with ranges [%" PRIu16 632 ", %" PRIu16 "] and [%" PRIu16 ", %" PRIu16 "]", 633 bmc_ver_min, bmc_ver_cur, host_ver_min, host_ver_cur); 634 return false; 635 } 636 637 return true; 638 } 639 640 static int mctp_astlpc_negotiate_layout_host(struct mctp_binding_astlpc *astlpc) 641 { 642 struct mctp_astlpc_layout layout; 643 uint32_t rmtu; 644 uint32_t sz; 645 int rc; 646 647 rc = mctp_astlpc_layout_read(astlpc, &layout); 648 if (rc < 0) 649 return rc; 650 651 if (!mctp_astlpc_layout_validate(astlpc, &layout)) { 652 astlpc_prerr( 653 astlpc, 654 "BMC provided invalid buffer layout: Rx {0x%" PRIx32 655 ", %" PRIu32 "}, Tx {0x%" PRIx32 ", %" PRIu32 "}", 656 layout.rx.offset, layout.rx.size, layout.tx.offset, 657 layout.tx.size); 658 return -EINVAL; 659 } 660 661 astlpc_prinfo(astlpc, "Desire an MTU of %" PRIu32 " bytes", 662 astlpc->requested_mtu); 663 664 rmtu = astlpc->requested_mtu; 665 sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu)); 666 layout.rx.size = sz; 667 668 if (!mctp_astlpc_layout_validate(astlpc, &layout)) { 669 astlpc_prerr( 670 astlpc, 671 "Generated invalid buffer layout with size %" PRIu32 672 ": Rx {0x%" PRIx32 ", %" PRIu32 "}, Tx {0x%" PRIx32 673 ", %" PRIu32 "}", 674 sz, layout.rx.offset, layout.rx.size, layout.tx.offset, 675 layout.tx.size); 676 return -EINVAL; 677 } 678 679 astlpc_prinfo(astlpc, "Requesting MTU of %" PRIu32 " bytes", 680 astlpc->requested_mtu); 681 682 return mctp_astlpc_layout_write(astlpc, &layout); 683 } 684 685 static uint16_t mctp_astlpc_negotiate_version(uint16_t bmc_ver_min, 686 uint16_t bmc_ver_cur, 687 uint16_t host_ver_min, 688 uint16_t host_ver_cur) 689 { 690 if (!mctp_astlpc_validate_version(bmc_ver_min, bmc_ver_cur, 691 host_ver_min, host_ver_cur)) 692 return ASTLPC_VER_BAD; 693 694 if (bmc_ver_cur < host_ver_cur) 695 return bmc_ver_cur; 696 697 return host_ver_cur; 698 } 699 700 static int mctp_astlpc_init_host(struct mctp_binding_astlpc *astlpc) 701 { 702 const uint16_t ver_min_be = htobe16(ASTLPC_VER_MIN); 703 const uint16_t ver_cur_be = htobe16(ASTLPC_VER_CUR); 704 uint16_t bmc_ver_min, bmc_ver_cur, negotiated; 705 struct mctp_lpcmap_hdr hdr; 706 uint8_t status; 707 int rc; 708 709 rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status); 710 if (rc) { 711 mctp_prwarn("KCS status read failed"); 712 return rc; 713 } 714 715 astlpc->kcs_status = status; 716 717 if (!(status & KCS_STATUS_BMC_READY)) 718 return -EHOSTDOWN; 719 720 mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr)); 721 722 bmc_ver_min = be16toh(hdr.bmc_ver_min); 723 bmc_ver_cur = be16toh(hdr.bmc_ver_cur); 724 725 /* Calculate the expected value of negotiated_ver */ 726 negotiated = mctp_astlpc_negotiate_version( 727 bmc_ver_min, bmc_ver_cur, ASTLPC_VER_MIN, ASTLPC_VER_CUR); 728 if (!negotiated) { 729 astlpc_prerr(astlpc, "Cannot negotiate with invalid versions"); 730 return -EINVAL; 731 } 732 733 /* Assign protocol ops so we can calculate the packet buffer sizes */ 734 assert(negotiated < ARRAY_SIZE(astlpc_protocol_version)); 735 astlpc->proto = &astlpc_protocol_version[negotiated]; 736 737 /* Negotiate packet buffers in v2 style if the BMC supports it */ 738 if (negotiated >= 2) { 739 rc = mctp_astlpc_negotiate_layout_host(astlpc); 740 if (rc < 0) 741 return rc; 742 } 743 744 /* Advertise the host's supported protocol versions */ 745 mctp_astlpc_lpc_write(astlpc, &ver_min_be, 746 offsetof(struct mctp_lpcmap_hdr, host_ver_min), 747 sizeof(ver_min_be)); 748 749 mctp_astlpc_lpc_write(astlpc, &ver_cur_be, 750 offsetof(struct mctp_lpcmap_hdr, host_ver_cur), 751 sizeof(ver_cur_be)); 752 753 /* Send channel init command */ 754 rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, 0x0); 755 if (rc) { 756 astlpc_prwarn(astlpc, "KCS write failed"); 757 } 758 759 /* 760 * Configure the host so `astlpc->proto->version == 0` holds until we 761 * receive a subsequent status update from the BMC. Until then, 762 * `astlpc->proto->version == 0` indicates that we're yet to complete 763 * the channel initialisation handshake. 764 * 765 * When the BMC provides a status update with KCS_STATUS_CHANNEL_ACTIVE 766 * set we will assign the appropriate protocol ops struct in accordance 767 * with `negotiated_ver`. 768 */ 769 astlpc->proto = &astlpc_protocol_version[ASTLPC_VER_BAD]; 770 771 return rc; 772 } 773 774 static int mctp_binding_astlpc_start_host(struct mctp_binding *b) 775 { 776 struct mctp_binding_astlpc *astlpc = 777 container_of(b, struct mctp_binding_astlpc, binding); 778 779 return mctp_astlpc_init_host(astlpc); 780 } 781 782 static bool __mctp_astlpc_kcs_ready(struct mctp_binding_astlpc *astlpc, 783 uint8_t status, bool is_write) 784 { 785 bool is_bmc; 786 bool ready_state; 787 uint8_t flag; 788 789 is_bmc = (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC); 790 flag = (is_bmc ^ is_write) ? KCS_STATUS_IBF : KCS_STATUS_OBF; 791 ready_state = is_write ? 0 : 1; 792 793 return !!(status & flag) == ready_state; 794 } 795 796 static inline bool 797 mctp_astlpc_kcs_read_ready(struct mctp_binding_astlpc *astlpc, uint8_t status) 798 { 799 return __mctp_astlpc_kcs_ready(astlpc, status, false); 800 } 801 802 static inline bool 803 mctp_astlpc_kcs_write_ready(struct mctp_binding_astlpc *astlpc, uint8_t status) 804 { 805 return __mctp_astlpc_kcs_ready(astlpc, status, true); 806 } 807 808 static int mctp_astlpc_kcs_send(struct mctp_binding_astlpc *astlpc, 809 enum mctp_astlpc_cmd data) 810 { 811 uint8_t status; 812 int rc; 813 814 rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status); 815 if (rc) { 816 astlpc_prwarn(astlpc, "KCS status read failed"); 817 return -EIO; 818 } 819 if (!mctp_astlpc_kcs_write_ready(astlpc, status)) 820 return -EBUSY; 821 822 rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, data); 823 if (rc) { 824 astlpc_prwarn(astlpc, "KCS data write failed"); 825 return -EIO; 826 } 827 828 return 0; 829 } 830 831 static int mctp_binding_astlpc_tx(struct mctp_binding *b, 832 struct mctp_pktbuf *pkt) 833 { 834 struct mctp_binding_astlpc *astlpc = binding_to_astlpc(b); 835 uint32_t len, len_be; 836 struct mctp_hdr *hdr; 837 int rc; 838 839 hdr = mctp_pktbuf_hdr(pkt); 840 len = mctp_pktbuf_size(pkt); 841 842 astlpc_prdebug(astlpc, 843 "%s: Transmitting %" PRIu32 844 "-byte packet (%hhu, %hhu, 0x%hhx)", 845 __func__, len, hdr->src, hdr->dest, hdr->flags_seq_tag); 846 847 if (len > astlpc->proto->body_size(astlpc->layout.tx.size)) { 848 astlpc_prwarn(astlpc, "invalid TX len %" PRIu32 ": %" PRIu32, 849 len, 850 astlpc->proto->body_size(astlpc->layout.tx.size)); 851 return -EMSGSIZE; 852 } 853 854 mctp_binding_set_tx_enabled(b, false); 855 856 len_be = htobe32(len); 857 mctp_astlpc_lpc_write(astlpc, &len_be, astlpc->layout.tx.offset, 858 sizeof(len_be)); 859 860 astlpc->proto->pktbuf_protect(pkt); 861 len = mctp_pktbuf_size(pkt); 862 863 mctp_astlpc_lpc_write(astlpc, hdr, astlpc->layout.tx.offset + 4, len); 864 865 astlpc->layout.tx.state = buffer_state_prepared; 866 867 rc = mctp_astlpc_kcs_send(astlpc, cmd_tx_begin); 868 if (!rc) 869 astlpc->layout.tx.state = buffer_state_released; 870 871 return rc == -EBUSY ? 0 : rc; 872 } 873 874 static uint32_t mctp_astlpc_calculate_mtu(struct mctp_binding_astlpc *astlpc, 875 struct mctp_astlpc_layout *layout) 876 { 877 uint32_t low, high, limit, rpkt; 878 879 /* Derive the largest MTU the BMC _can_ support */ 880 low = MIN(astlpc->layout.rx.offset, astlpc->layout.tx.offset); 881 high = MAX(astlpc->layout.rx.offset, astlpc->layout.tx.offset); 882 limit = high - low; 883 884 /* Determine the largest MTU the BMC _wants_ to support */ 885 if (astlpc->requested_mtu) { 886 uint32_t rmtu = astlpc->requested_mtu; 887 888 rpkt = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu)); 889 limit = MIN(limit, rpkt); 890 } 891 892 /* Determine the accepted MTU, applied both directions by convention */ 893 rpkt = MIN(limit, layout->tx.size); 894 return MCTP_BODY_SIZE(astlpc->proto->body_size(rpkt)); 895 } 896 897 static int mctp_astlpc_negotiate_layout_bmc(struct mctp_binding_astlpc *astlpc) 898 { 899 struct mctp_astlpc_layout proposed, pending; 900 uint32_t sz, mtu; 901 int rc; 902 903 /* Do we have a valid protocol version? */ 904 if (!astlpc->proto->version) 905 return -EINVAL; 906 907 /* Extract the host's proposed layout */ 908 rc = mctp_astlpc_layout_read(astlpc, &proposed); 909 if (rc < 0) 910 return rc; 911 912 /* Do we have a reasonable layout? */ 913 if (!mctp_astlpc_layout_validate(astlpc, &proposed)) 914 return -EINVAL; 915 916 /* Negotiate the MTU */ 917 mtu = mctp_astlpc_calculate_mtu(astlpc, &proposed); 918 sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(mtu)); 919 920 /* 921 * Use symmetric MTUs by convention and to pass constraints in rx/tx 922 * functions 923 */ 924 pending = astlpc->layout; 925 pending.tx.size = sz; 926 pending.rx.size = sz; 927 928 if (mctp_astlpc_layout_validate(astlpc, &pending)) { 929 /* We found a sensible Rx MTU, so honour it */ 930 astlpc->layout = pending; 931 932 /* Enforce the negotiated MTU */ 933 rc = mctp_astlpc_layout_write(astlpc, &astlpc->layout); 934 if (rc < 0) 935 return rc; 936 937 astlpc_prinfo(astlpc, "Negotiated an MTU of %" PRIu32 " bytes", 938 mtu); 939 } else { 940 astlpc_prwarn(astlpc, "MTU negotiation failed"); 941 return -EINVAL; 942 } 943 944 if (astlpc->proto->version >= 2) 945 astlpc->binding.pkt_size = MCTP_PACKET_SIZE(mtu); 946 947 return 0; 948 } 949 950 static void mctp_astlpc_init_channel(struct mctp_binding_astlpc *astlpc) 951 { 952 uint16_t negotiated, negotiated_be; 953 struct mctp_lpcmap_hdr hdr; 954 uint8_t status; 955 int rc; 956 957 mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr)); 958 959 /* Version negotiation */ 960 negotiated = mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, 961 ASTLPC_VER_CUR, 962 be16toh(hdr.host_ver_min), 963 be16toh(hdr.host_ver_cur)); 964 965 /* MTU negotiation requires knowing which protocol we'll use */ 966 assert(negotiated < ARRAY_SIZE(astlpc_protocol_version)); 967 astlpc->proto = &astlpc_protocol_version[negotiated]; 968 969 /* Host Rx MTU negotiation: Failure terminates channel init */ 970 rc = mctp_astlpc_negotiate_layout_bmc(astlpc); 971 if (rc < 0) 972 negotiated = ASTLPC_VER_BAD; 973 974 /* Populate the negotiated version */ 975 negotiated_be = htobe16(negotiated); 976 mctp_astlpc_lpc_write(astlpc, &negotiated_be, 977 offsetof(struct mctp_lpcmap_hdr, negotiated_ver), 978 sizeof(negotiated_be)); 979 980 /* Track buffer ownership */ 981 astlpc->layout.tx.state = buffer_state_acquired; 982 astlpc->layout.rx.state = buffer_state_released; 983 984 /* Finalise the configuration */ 985 status = KCS_STATUS_BMC_READY | KCS_STATUS_OBF; 986 if (negotiated > 0) { 987 astlpc_prinfo(astlpc, "Negotiated binding version %" PRIu16, 988 negotiated); 989 status |= KCS_STATUS_CHANNEL_ACTIVE; 990 } else { 991 astlpc_prerr(astlpc, "Failed to initialise channel"); 992 } 993 994 mctp_astlpc_kcs_set_status(astlpc, status); 995 996 mctp_binding_set_tx_enabled(&astlpc->binding, 997 status & KCS_STATUS_CHANNEL_ACTIVE); 998 } 999 1000 static void mctp_astlpc_rx_start(struct mctp_binding_astlpc *astlpc) 1001 { 1002 struct mctp_pktbuf *pkt; 1003 struct mctp_hdr *hdr; 1004 uint32_t body, packet; 1005 1006 mctp_astlpc_lpc_read(astlpc, &body, astlpc->layout.rx.offset, 1007 sizeof(body)); 1008 body = be32toh(body); 1009 1010 if (body > astlpc->proto->body_size(astlpc->layout.rx.size)) { 1011 astlpc_prwarn(astlpc, "invalid RX len 0x%x", body); 1012 return; 1013 } 1014 1015 if ((size_t)body > astlpc->binding.pkt_size) { 1016 astlpc_prwarn(astlpc, "invalid RX len 0x%x", body); 1017 return; 1018 } 1019 1020 /* Eliminate the medium-specific header that we just read */ 1021 packet = astlpc->proto->packet_size(body) - 4; 1022 pkt = mctp_pktbuf_alloc(&astlpc->binding, packet); 1023 if (!pkt) { 1024 astlpc_prwarn(astlpc, "unable to allocate pktbuf len 0x%x", 1025 packet); 1026 return; 1027 } 1028 1029 /* 1030 * Read payload and medium-specific trailer from immediately after the 1031 * medium-specific header. 1032 */ 1033 mctp_astlpc_lpc_read(astlpc, mctp_pktbuf_hdr(pkt), 1034 astlpc->layout.rx.offset + 4, packet); 1035 1036 astlpc->layout.rx.state = buffer_state_prepared; 1037 1038 /* Inform the other side of the MCTP interface that we have read 1039 * the packet off the bus before handling the contents of the packet. 1040 */ 1041 if (!mctp_astlpc_kcs_send(astlpc, cmd_rx_complete)) 1042 astlpc->layout.rx.state = buffer_state_released; 1043 1044 hdr = mctp_pktbuf_hdr(pkt); 1045 if (hdr->ver != 1) { 1046 mctp_pktbuf_free(pkt); 1047 astlpc_prdebug(astlpc, "Dropped packet with invalid version"); 1048 return; 1049 } 1050 1051 /* 1052 * v3 will validate the CRC32 in the medium-specific trailer and adjust 1053 * the packet size accordingly. On older protocols validation is a no-op 1054 * that always returns true. 1055 */ 1056 if (astlpc->proto->pktbuf_validate(pkt)) { 1057 mctp_bus_rx(&astlpc->binding, pkt); 1058 } else { 1059 /* TODO: Drop any associated assembly */ 1060 mctp_pktbuf_free(pkt); 1061 astlpc_prdebug(astlpc, "Dropped corrupt packet"); 1062 } 1063 } 1064 1065 static void mctp_astlpc_tx_complete(struct mctp_binding_astlpc *astlpc) 1066 { 1067 astlpc->layout.tx.state = buffer_state_acquired; 1068 mctp_binding_set_tx_enabled(&astlpc->binding, true); 1069 } 1070 1071 static int mctp_astlpc_finalise_channel(struct mctp_binding_astlpc *astlpc) 1072 { 1073 struct mctp_astlpc_layout layout; 1074 uint16_t negotiated; 1075 int rc; 1076 1077 rc = mctp_astlpc_lpc_read(astlpc, &negotiated, 1078 offsetof(struct mctp_lpcmap_hdr, 1079 negotiated_ver), 1080 sizeof(negotiated)); 1081 if (rc < 0) 1082 return rc; 1083 1084 negotiated = be16toh(negotiated); 1085 astlpc_prerr(astlpc, "Version negotiation got: %u", negotiated); 1086 1087 if (negotiated == ASTLPC_VER_BAD || negotiated < ASTLPC_VER_MIN || 1088 negotiated > ASTLPC_VER_CUR) { 1089 astlpc_prerr(astlpc, "Failed to negotiate version, got: %u\n", 1090 negotiated); 1091 return -EINVAL; 1092 } 1093 1094 assert(negotiated < ARRAY_SIZE(astlpc_protocol_version)); 1095 astlpc->proto = &astlpc_protocol_version[negotiated]; 1096 1097 rc = mctp_astlpc_layout_read(astlpc, &layout); 1098 if (rc < 0) 1099 return rc; 1100 1101 if (!mctp_astlpc_layout_validate(astlpc, &layout)) { 1102 mctp_prerr("BMC proposed invalid buffer parameters"); 1103 return -EINVAL; 1104 } 1105 1106 astlpc->layout = layout; 1107 1108 if (negotiated >= 2) 1109 astlpc->binding.pkt_size = 1110 astlpc->proto->body_size(astlpc->layout.tx.size); 1111 1112 /* Track buffer ownership */ 1113 astlpc->layout.tx.state = buffer_state_acquired; 1114 astlpc->layout.rx.state = buffer_state_released; 1115 1116 return 0; 1117 } 1118 1119 static int mctp_astlpc_update_channel(struct mctp_binding_astlpc *astlpc, 1120 uint8_t status) 1121 { 1122 uint8_t updated; 1123 int rc = 0; 1124 1125 assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST); 1126 1127 updated = astlpc->kcs_status ^ status; 1128 1129 astlpc_prdebug(astlpc, "%s: status: 0x%x, update: 0x%x", __func__, 1130 status, updated); 1131 1132 if (updated & KCS_STATUS_BMC_READY) { 1133 if (status & KCS_STATUS_BMC_READY) { 1134 astlpc->kcs_status = status; 1135 return astlpc->binding.start(&astlpc->binding); 1136 } else { 1137 /* Shut down the channel */ 1138 astlpc->layout.rx.state = buffer_state_idle; 1139 astlpc->layout.tx.state = buffer_state_idle; 1140 mctp_binding_set_tx_enabled(&astlpc->binding, false); 1141 } 1142 } 1143 1144 if (astlpc->proto->version == 0 || 1145 updated & KCS_STATUS_CHANNEL_ACTIVE) { 1146 bool enable; 1147 1148 astlpc->layout.rx.state = buffer_state_idle; 1149 astlpc->layout.tx.state = buffer_state_idle; 1150 rc = mctp_astlpc_finalise_channel(astlpc); 1151 enable = (status & KCS_STATUS_CHANNEL_ACTIVE) && rc == 0; 1152 mctp_binding_set_tx_enabled(&astlpc->binding, enable); 1153 } 1154 1155 astlpc->kcs_status = status; 1156 1157 return rc; 1158 } 1159 1160 bool mctp_astlpc_tx_done(struct mctp_binding_astlpc *astlpc) 1161 { 1162 return astlpc->layout.tx.state == buffer_state_acquired; 1163 } 1164 1165 int mctp_astlpc_poll(struct mctp_binding_astlpc *astlpc) 1166 { 1167 uint8_t status, data; 1168 int rc; 1169 1170 if (astlpc->layout.rx.state == buffer_state_prepared) 1171 if (!mctp_astlpc_kcs_send(astlpc, cmd_rx_complete)) 1172 astlpc->layout.rx.state = buffer_state_released; 1173 1174 if (astlpc->layout.tx.state == buffer_state_prepared) 1175 if (!mctp_astlpc_kcs_send(astlpc, cmd_tx_begin)) 1176 astlpc->layout.tx.state = buffer_state_released; 1177 1178 rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status); 1179 if (rc) { 1180 astlpc_prwarn(astlpc, "KCS read error"); 1181 return -1; 1182 } 1183 1184 astlpc_prdebug(astlpc, "%s: status: 0x%hhx", __func__, status); 1185 1186 if (!mctp_astlpc_kcs_read_ready(astlpc, status)) 1187 return 0; 1188 1189 rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_DATA, &data); 1190 if (rc) { 1191 astlpc_prwarn(astlpc, "KCS data read error"); 1192 return -1; 1193 } 1194 1195 astlpc_prdebug(astlpc, "%s: data: 0x%hhx", __func__, data); 1196 1197 if (!astlpc->proto->version && 1198 !(data == cmd_initialise || data == cmd_dummy_value)) { 1199 astlpc_prwarn(astlpc, "Invalid message for binding state: 0x%x", 1200 data); 1201 return 0; 1202 } 1203 1204 switch (data) { 1205 case cmd_initialise: 1206 mctp_astlpc_init_channel(astlpc); 1207 break; 1208 case cmd_tx_begin: 1209 if (astlpc->layout.rx.state != buffer_state_released) { 1210 astlpc_prerr( 1211 astlpc, 1212 "Protocol error: Invalid Rx buffer state for event %d: %d\n", 1213 data, astlpc->layout.rx.state); 1214 return 0; 1215 } 1216 mctp_astlpc_rx_start(astlpc); 1217 break; 1218 case cmd_rx_complete: 1219 if (astlpc->layout.tx.state != buffer_state_released) { 1220 astlpc_prerr( 1221 astlpc, 1222 "Protocol error: Invalid Tx buffer state for event %d: %d\n", 1223 data, astlpc->layout.tx.state); 1224 return 0; 1225 } 1226 mctp_astlpc_tx_complete(astlpc); 1227 break; 1228 case cmd_dummy_value: 1229 /* No responsibilities for the BMC on 0xff */ 1230 if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST) { 1231 rc = mctp_astlpc_update_channel(astlpc, status); 1232 if (rc < 0) 1233 return rc; 1234 } 1235 break; 1236 default: 1237 astlpc_prwarn(astlpc, "unknown message 0x%x", data); 1238 } 1239 1240 /* Handle silent loss of bmc-ready */ 1241 if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST) { 1242 if (!(status & KCS_STATUS_BMC_READY && data == cmd_dummy_value)) 1243 return mctp_astlpc_update_channel(astlpc, status); 1244 } 1245 1246 return rc; 1247 } 1248 1249 /* allocate and basic initialisation */ 1250 static struct mctp_binding_astlpc *__mctp_astlpc_init(uint8_t mode, 1251 uint32_t mtu) 1252 { 1253 struct mctp_binding_astlpc *astlpc; 1254 1255 assert((mode == MCTP_BINDING_ASTLPC_MODE_BMC) || 1256 (mode == MCTP_BINDING_ASTLPC_MODE_HOST)); 1257 1258 astlpc = __mctp_alloc(sizeof(*astlpc)); 1259 if (!astlpc) 1260 return NULL; 1261 1262 memset(astlpc, 0, sizeof(*astlpc)); 1263 astlpc->mode = mode; 1264 astlpc->lpc_map = NULL; 1265 astlpc->layout.rx.state = buffer_state_idle; 1266 astlpc->layout.tx.state = buffer_state_idle; 1267 astlpc->requested_mtu = mtu; 1268 astlpc->binding.name = "astlpc"; 1269 astlpc->binding.version = 1; 1270 astlpc->binding.pkt_size = 1271 MCTP_PACKET_SIZE(mtu > MCTP_BTU ? mtu : MCTP_BTU); 1272 astlpc->binding.pkt_header = 4; 1273 astlpc->binding.pkt_trailer = 4; 1274 astlpc->binding.tx = mctp_binding_astlpc_tx; 1275 if (mode == MCTP_BINDING_ASTLPC_MODE_BMC) 1276 astlpc->binding.start = mctp_binding_astlpc_start_bmc; 1277 else if (mode == MCTP_BINDING_ASTLPC_MODE_HOST) 1278 astlpc->binding.start = mctp_binding_astlpc_start_host; 1279 else { 1280 astlpc_prerr(astlpc, "%s: Invalid mode: %d\n", __func__, mode); 1281 __mctp_free(astlpc); 1282 return NULL; 1283 } 1284 1285 return astlpc; 1286 } 1287 1288 struct mctp_binding *mctp_binding_astlpc_core(struct mctp_binding_astlpc *b) 1289 { 1290 return &b->binding; 1291 } 1292 1293 struct mctp_binding_astlpc * 1294 mctp_astlpc_init(uint8_t mode, uint32_t mtu, void *lpc_map, 1295 const struct mctp_binding_astlpc_ops *ops, void *ops_data) 1296 { 1297 struct mctp_binding_astlpc *astlpc; 1298 1299 if (!(mode == MCTP_BINDING_ASTLPC_MODE_BMC || 1300 mode == MCTP_BINDING_ASTLPC_MODE_HOST)) { 1301 mctp_prerr("Unknown binding mode: %u", mode); 1302 return NULL; 1303 } 1304 1305 astlpc = __mctp_astlpc_init(mode, mtu); 1306 if (!astlpc) 1307 return NULL; 1308 1309 memcpy(&astlpc->ops, ops, sizeof(astlpc->ops)); 1310 astlpc->ops_data = ops_data; 1311 astlpc->lpc_map = lpc_map; 1312 astlpc->mode = mode; 1313 1314 return astlpc; 1315 } 1316 1317 struct mctp_binding_astlpc * 1318 mctp_astlpc_init_ops(const struct mctp_binding_astlpc_ops *ops, void *ops_data, 1319 void *lpc_map) 1320 { 1321 return mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, lpc_map, 1322 ops, ops_data); 1323 } 1324 1325 void mctp_astlpc_destroy(struct mctp_binding_astlpc *astlpc) 1326 { 1327 /* Clear channel-active and bmc-ready */ 1328 if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) 1329 mctp_astlpc_kcs_set_status(astlpc, 0); 1330 __mctp_free(astlpc); 1331 } 1332 1333 #ifdef MCTP_HAVE_FILEIO 1334 1335 static int mctp_astlpc_init_fileio_lpc(struct mctp_binding_astlpc *astlpc) 1336 { 1337 struct aspeed_lpc_ctrl_mapping map = { 1338 .window_type = ASPEED_LPC_CTRL_WINDOW_MEMORY, 1339 .window_id = 0, /* There's only one */ 1340 .flags = 0, 1341 .addr = 0, 1342 .offset = 0, 1343 .size = 0 1344 }; 1345 void *lpc_map_base; 1346 int fd, rc; 1347 1348 fd = open(lpc_path, O_RDWR | O_SYNC); 1349 if (fd < 0) { 1350 astlpc_prwarn(astlpc, "LPC open (%s) failed", lpc_path); 1351 return -1; 1352 } 1353 1354 rc = ioctl(fd, ASPEED_LPC_CTRL_IOCTL_GET_SIZE, &map); 1355 if (rc) { 1356 astlpc_prwarn(astlpc, "LPC GET_SIZE failed"); 1357 close(fd); 1358 return -1; 1359 } 1360 1361 /* 1362 * 1363 * 1364 * Decouple ourselves from hiomapd[1] (another user of the FW2AHB) by 1365 * mapping the FW2AHB to the reserved memory here as well. 1366 * 1367 * It's not possible to use the MCTP ASTLPC binding on machines that 1368 * need the FW2AHB bridge mapped anywhere except to the reserved memory 1369 * (e.g. the host SPI NOR). 1370 * 1371 * [1] https://github.com/openbmc/hiomapd/ 1372 * 1373 * 1374 * 1375 * The following calculation must align with what's going on in 1376 * hiomapd's lpc.c so as not to disrupt its behaviour: 1377 * 1378 * https://github.com/openbmc/hiomapd/blob/5ff50e3cbd7702aefc185264e4adfb9952040575/lpc.c#L68 1379 * 1380 * 1381 */ 1382 1383 /* Map the reserved memory at the top of the 28-bit LPC firmware address space */ 1384 map.addr = 0x0FFFFFFF & -map.size; 1385 astlpc_prinfo( 1386 astlpc, 1387 "Configuring FW2AHB to map reserved memory at 0x%08x for 0x%x in the LPC FW cycle address-space", 1388 map.addr, map.size); 1389 1390 rc = ioctl(fd, ASPEED_LPC_CTRL_IOCTL_MAP, &map); 1391 if (rc) { 1392 astlpc_prwarn(astlpc, 1393 "Failed to map FW2AHB to reserved memory"); 1394 close(fd); 1395 return -1; 1396 } 1397 1398 /* Map the reserved memory into our address space */ 1399 lpc_map_base = 1400 mmap(NULL, map.size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 1401 if (lpc_map_base == MAP_FAILED) { 1402 astlpc_prwarn(astlpc, "LPC mmap failed"); 1403 rc = -1; 1404 } else { 1405 astlpc->lpc_map = lpc_map_base + map.size - LPC_WIN_SIZE; 1406 } 1407 1408 close(fd); 1409 1410 return rc; 1411 } 1412 1413 static int mctp_astlpc_init_fileio_kcs(struct mctp_binding_astlpc *astlpc) 1414 { 1415 astlpc->kcs_fd = open(kcs_path, O_RDWR); 1416 if (astlpc->kcs_fd < 0) 1417 return -1; 1418 1419 return 0; 1420 } 1421 1422 static int __mctp_astlpc_fileio_kcs_read(void *arg, 1423 enum mctp_binding_astlpc_kcs_reg reg, 1424 uint8_t *val) 1425 { 1426 struct mctp_binding_astlpc *astlpc = arg; 1427 off_t offset = reg; 1428 int rc; 1429 1430 rc = pread(astlpc->kcs_fd, val, 1, offset); 1431 1432 return rc == 1 ? 0 : -1; 1433 } 1434 1435 static int __mctp_astlpc_fileio_kcs_write(void *arg, 1436 enum mctp_binding_astlpc_kcs_reg reg, 1437 uint8_t val) 1438 { 1439 struct mctp_binding_astlpc *astlpc = arg; 1440 off_t offset = reg; 1441 int rc; 1442 1443 rc = pwrite(astlpc->kcs_fd, &val, 1, offset); 1444 1445 return rc == 1 ? 0 : -1; 1446 } 1447 1448 int mctp_astlpc_init_pollfd(struct mctp_binding_astlpc *astlpc, 1449 struct pollfd *pollfd) 1450 { 1451 bool release; 1452 1453 pollfd->fd = astlpc->kcs_fd; 1454 pollfd->events = 0; 1455 1456 release = astlpc->layout.rx.state == buffer_state_prepared || 1457 astlpc->layout.tx.state == buffer_state_prepared; 1458 1459 pollfd->events = release ? POLLOUT : POLLIN; 1460 1461 return 0; 1462 } 1463 1464 struct mctp_binding_astlpc *mctp_astlpc_init_fileio(void) 1465 { 1466 struct mctp_binding_astlpc *astlpc; 1467 int rc; 1468 1469 /* 1470 * If we're doing file IO then we're very likely not running 1471 * freestanding, so lets assume that we're on the BMC side. 1472 * 1473 * Requesting an MTU of 0 requests the largest possible MTU, whatever 1474 * value that might take. 1475 */ 1476 astlpc = __mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, 0); 1477 if (!astlpc) 1478 return NULL; 1479 1480 /* Set internal operations for kcs. We use direct accesses to the lpc 1481 * map area */ 1482 astlpc->ops.kcs_read = __mctp_astlpc_fileio_kcs_read; 1483 astlpc->ops.kcs_write = __mctp_astlpc_fileio_kcs_write; 1484 astlpc->ops_data = astlpc; 1485 1486 rc = mctp_astlpc_init_fileio_lpc(astlpc); 1487 if (rc) { 1488 free(astlpc); 1489 return NULL; 1490 } 1491 1492 rc = mctp_astlpc_init_fileio_kcs(astlpc); 1493 if (rc) { 1494 free(astlpc); 1495 return NULL; 1496 } 1497 1498 return astlpc; 1499 } 1500 #else 1501 struct mctp_binding_astlpc *mctp_astlpc_init_fileio(void) 1502 { 1503 mctp_prlog(MCTP_LOG_ERR, "%s: Missing support for file IO", __func__); 1504 return NULL; 1505 } 1506 1507 int mctp_astlpc_init_pollfd(struct mctp_binding_astlpc *astlpc __unused, 1508 struct pollfd *pollfd __unused) 1509 { 1510 mctp_prlog(MCTP_LOG_ERR, "%s: Missing support for file IO", __func__); 1511 return -1; 1512 } 1513 #endif 1514