1 /* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */ 2 3 #if HAVE_CONFIG_H 4 #include "config.h" 5 #endif 6 7 #if HAVE_ENDIAN_H 8 #include <endian.h> 9 #endif 10 11 #include <assert.h> 12 #include <err.h> 13 #include <errno.h> 14 #include <inttypes.h> 15 #include <stdbool.h> 16 #include <stdlib.h> 17 #include <string.h> 18 19 #define pr_fmt(x) "astlpc: " x 20 21 #include "libmctp.h" 22 #include "libmctp-alloc.h" 23 #include "libmctp-log.h" 24 #include "libmctp-astlpc.h" 25 #include "container_of.h" 26 27 #ifdef MCTP_HAVE_FILEIO 28 29 #include <unistd.h> 30 #include <fcntl.h> 31 #include <sys/ioctl.h> 32 #include <sys/mman.h> 33 #include <linux/aspeed-lpc-ctrl.h> 34 35 /* kernel interface */ 36 static const char *kcs_path = "/dev/mctp0"; 37 static const char *lpc_path = "/dev/aspeed-lpc-ctrl"; 38 39 #endif 40 41 struct mctp_astlpc_buffer { 42 uint32_t offset; 43 uint32_t size; 44 }; 45 46 struct mctp_astlpc_layout { 47 struct mctp_astlpc_buffer rx; 48 struct mctp_astlpc_buffer tx; 49 }; 50 51 struct mctp_binding_astlpc { 52 struct mctp_binding binding; 53 54 void *lpc_map; 55 struct mctp_astlpc_layout layout; 56 57 uint8_t mode; 58 uint16_t version; 59 uint32_t requested_mtu; 60 61 /* direct ops data */ 62 struct mctp_binding_astlpc_ops ops; 63 void *ops_data; 64 65 /* fileio ops data */ 66 int kcs_fd; 67 uint8_t kcs_status; 68 69 bool running; 70 }; 71 72 #define binding_to_astlpc(b) \ 73 container_of(b, struct mctp_binding_astlpc, binding) 74 75 #define astlpc_prlog(ctx, lvl, fmt, ...) \ 76 do { \ 77 bool __bmc = ((ctx)->mode == MCTP_BINDING_ASTLPC_MODE_BMC); \ 78 mctp_prlog(lvl, pr_fmt("%s: " fmt), __bmc ? "bmc" : "host", \ 79 ##__VA_ARGS__); \ 80 } while (0) 81 82 #define astlpc_prerr(ctx, fmt, ...) \ 83 astlpc_prlog(ctx, MCTP_LOG_ERR, fmt, ##__VA_ARGS__) 84 #define astlpc_prwarn(ctx, fmt, ...) \ 85 astlpc_prlog(ctx, MCTP_LOG_WARNING, fmt, ##__VA_ARGS__) 86 #define astlpc_prinfo(ctx, fmt, ...) \ 87 astlpc_prlog(ctx, MCTP_LOG_INFO, fmt, ##__VA_ARGS__) 88 #define astlpc_prdebug(ctx, fmt, ...) \ 89 astlpc_prlog(ctx, MCTP_LOG_DEBUG, fmt, ##__VA_ARGS__) 90 91 /* clang-format off */ 92 #define ASTLPC_MCTP_MAGIC 0x4d435450 93 #define ASTLPC_VER_BAD 0 94 #define ASTLPC_VER_MIN 1 95 96 /* Support testing of new binding protocols */ 97 #ifndef ASTLPC_VER_CUR 98 #define ASTLPC_VER_CUR 2 99 #endif 100 101 #define ASTLPC_PACKET_SIZE(sz) (4 + (sz)) 102 #define ASTLPC_BODY_SIZE(sz) ((sz) - 4) 103 /* clang-format on */ 104 105 struct mctp_lpcmap_hdr { 106 uint32_t magic; 107 108 uint16_t bmc_ver_min; 109 uint16_t bmc_ver_cur; 110 uint16_t host_ver_min; 111 uint16_t host_ver_cur; 112 uint16_t negotiated_ver; 113 uint16_t pad0; 114 115 struct { 116 uint32_t rx_offset; 117 uint32_t rx_size; 118 uint32_t tx_offset; 119 uint32_t tx_size; 120 } layout; 121 } __attribute__((packed)); 122 123 static const uint32_t control_size = 0x100; 124 125 #define LPC_WIN_SIZE (1 * 1024 * 1024) 126 127 #define KCS_STATUS_BMC_READY 0x80 128 #define KCS_STATUS_CHANNEL_ACTIVE 0x40 129 #define KCS_STATUS_IBF 0x02 130 #define KCS_STATUS_OBF 0x01 131 132 #define MIN(a, b) \ 133 ({ \ 134 typeof(a) _a = a; \ 135 typeof(b) _b = b; \ 136 _a < _b ? _a : _b; \ 137 }) 138 139 #define MAX(a, b) \ 140 ({ \ 141 typeof(a) _a = a; \ 142 typeof(b) _b = b; \ 143 _a > _b ? _a : _b; \ 144 }) 145 146 static inline int mctp_astlpc_kcs_write(struct mctp_binding_astlpc *astlpc, 147 enum mctp_binding_astlpc_kcs_reg reg, 148 uint8_t val) 149 { 150 return astlpc->ops.kcs_write(astlpc->ops_data, reg, val); 151 } 152 153 static inline int mctp_astlpc_kcs_read(struct mctp_binding_astlpc *astlpc, 154 enum mctp_binding_astlpc_kcs_reg reg, 155 uint8_t *val) 156 { 157 return astlpc->ops.kcs_read(astlpc->ops_data, reg, val); 158 } 159 160 static inline int mctp_astlpc_lpc_write(struct mctp_binding_astlpc *astlpc, 161 const void *buf, long offset, 162 size_t len) 163 { 164 astlpc_prdebug(astlpc, "%s: %zu bytes to 0x%lx", __func__, len, offset); 165 166 assert(offset >= 0); 167 168 /* Indirect access */ 169 if (astlpc->ops.lpc_write) { 170 void *data = astlpc->ops_data; 171 172 return astlpc->ops.lpc_write(data, buf, offset, len); 173 } 174 175 /* Direct mapping */ 176 assert(astlpc->lpc_map); 177 memcpy(&((char *)astlpc->lpc_map)[offset], buf, len); 178 179 return 0; 180 } 181 182 static inline int mctp_astlpc_lpc_read(struct mctp_binding_astlpc *astlpc, 183 void *buf, long offset, size_t len) 184 { 185 astlpc_prdebug(astlpc, "%s: %zu bytes from 0x%lx", __func__, len, 186 offset); 187 188 assert(offset >= 0); 189 190 /* Indirect access */ 191 if (astlpc->ops.lpc_read) { 192 void *data = astlpc->ops_data; 193 194 return astlpc->ops.lpc_read(data, buf, offset, len); 195 } 196 197 /* Direct mapping */ 198 assert(astlpc->lpc_map); 199 memcpy(buf, &((char *)astlpc->lpc_map)[offset], len); 200 201 return 0; 202 } 203 204 static int mctp_astlpc_kcs_set_status(struct mctp_binding_astlpc *astlpc, 205 uint8_t status) 206 { 207 uint8_t data; 208 int rc; 209 210 /* Since we're setting the status register, we want the other endpoint 211 * to be interrupted. However, some hardware may only raise a host-side 212 * interrupt on an ODR event. 213 * So, write a dummy value of 0xff to ODR, which will ensure that an 214 * interrupt is triggered, and can be ignored by the host. 215 */ 216 data = 0xff; 217 status |= KCS_STATUS_OBF; 218 219 rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, status); 220 if (rc) { 221 astlpc_prwarn(astlpc, "KCS status write failed"); 222 return -1; 223 } 224 225 rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, data); 226 if (rc) { 227 astlpc_prwarn(astlpc, "KCS dummy data write failed"); 228 return -1; 229 } 230 231 return 0; 232 } 233 234 static int mctp_astlpc_layout_read(struct mctp_binding_astlpc *astlpc, 235 struct mctp_astlpc_layout *layout) 236 { 237 struct mctp_lpcmap_hdr hdr; 238 int rc; 239 240 rc = mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr)); 241 if (rc < 0) 242 return rc; 243 244 /* Flip the buffers as the names are defined in terms of the host */ 245 if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) { 246 layout->rx.offset = be32toh(hdr.layout.tx_offset); 247 layout->rx.size = be32toh(hdr.layout.tx_size); 248 layout->tx.offset = be32toh(hdr.layout.rx_offset); 249 layout->tx.size = be32toh(hdr.layout.rx_size); 250 } else { 251 assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST); 252 253 layout->rx.offset = be32toh(hdr.layout.rx_offset); 254 layout->rx.size = be32toh(hdr.layout.rx_size); 255 layout->tx.offset = be32toh(hdr.layout.tx_offset); 256 layout->tx.size = be32toh(hdr.layout.tx_size); 257 } 258 259 return 0; 260 } 261 262 static int mctp_astlpc_layout_write(struct mctp_binding_astlpc *astlpc, 263 struct mctp_astlpc_layout *layout) 264 { 265 uint32_t rx_size_be; 266 267 if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) { 268 struct mctp_lpcmap_hdr hdr; 269 270 /* 271 * Flip the buffers as the names are defined in terms of the 272 * host 273 */ 274 hdr.layout.rx_offset = htobe32(layout->tx.offset); 275 hdr.layout.rx_size = htobe32(layout->tx.size); 276 hdr.layout.tx_offset = htobe32(layout->rx.offset); 277 hdr.layout.tx_size = htobe32(layout->rx.size); 278 279 return mctp_astlpc_lpc_write(astlpc, &hdr.layout, 280 offsetof(struct mctp_lpcmap_hdr, layout), 281 sizeof(hdr.layout)); 282 } 283 284 assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST); 285 286 /* 287 * As of v2 we only need to write rx_size - the offsets are controlled 288 * by the BMC, as is the BMC's rx_size (host tx_size). 289 */ 290 rx_size_be = htobe32(layout->rx.size); 291 return mctp_astlpc_lpc_write(astlpc, &rx_size_be, 292 offsetof(struct mctp_lpcmap_hdr, layout.rx_size), 293 sizeof(rx_size_be)); 294 } 295 296 static bool mctp_astlpc_buffer_validate(struct mctp_astlpc_buffer *buf, 297 const char *name) 298 { 299 /* Check for overflow */ 300 if (buf->offset + buf->size < buf->offset) { 301 mctp_prerr( 302 "%s packet buffer parameters overflow: offset: 0x%" PRIx32 303 ", size: %" PRIu32, 304 name, buf->offset, buf->size); 305 return false; 306 } 307 308 /* Check that the buffers are contained within the allocated space */ 309 if (buf->offset + buf->size > LPC_WIN_SIZE) { 310 mctp_prerr( 311 "%s packet buffer parameters exceed %uM window size: offset: 0x%" PRIx32 312 ", size: %" PRIu32, 313 name, (LPC_WIN_SIZE / (1024 * 1024)), buf->offset, 314 buf->size); 315 return false; 316 } 317 318 /* Check that the baseline transmission unit is supported */ 319 if (buf->size < ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(MCTP_BTU))) { 320 mctp_prerr( 321 "%s packet buffer too small: Require %zu bytes to support the %u byte baseline transmission unit, found %" PRIu32, 322 name, ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(MCTP_BTU)), 323 MCTP_BTU, buf->size); 324 return false; 325 } 326 327 /* Check for overlap with the control space */ 328 if (buf->offset < control_size) { 329 mctp_prerr( 330 "%s packet buffer overlaps control region {0x%" PRIx32 331 ", %" PRIu32 "}: Rx {0x%" PRIx32 ", %" PRIu32 "}", 332 name, 0U, control_size, buf->offset, buf->size); 333 return false; 334 } 335 336 return true; 337 } 338 339 static bool mctp_astlpc_layout_validate(struct mctp_astlpc_layout *layout) 340 { 341 struct mctp_astlpc_buffer *rx = &layout->rx; 342 struct mctp_astlpc_buffer *tx = &layout->tx; 343 bool rx_valid, tx_valid; 344 345 rx_valid = mctp_astlpc_buffer_validate(rx, "Rx"); 346 tx_valid = mctp_astlpc_buffer_validate(tx, "Tx"); 347 348 if (!(rx_valid && tx_valid)) 349 return false; 350 351 /* Check that the buffers are disjoint */ 352 if ((rx->offset <= tx->offset && rx->offset + rx->size > tx->offset) || 353 (tx->offset <= rx->offset && tx->offset + tx->size > rx->offset)) { 354 mctp_prerr("Rx and Tx packet buffers overlap: Rx {0x%" PRIx32 355 ", %" PRIu32 "}, Tx {0x%" PRIx32 ", %" PRIu32 "}", 356 rx->offset, rx->size, tx->offset, tx->size); 357 return false; 358 } 359 360 return true; 361 } 362 363 static int mctp_astlpc_init_bmc(struct mctp_binding_astlpc *astlpc) 364 { 365 struct mctp_lpcmap_hdr hdr = { 0 }; 366 uint8_t status; 367 size_t sz; 368 369 /* 370 * The largest buffer size is half of the allocated MCTP space 371 * excluding the control space. 372 */ 373 sz = ((LPC_WIN_SIZE - control_size) / 2); 374 375 /* 376 * Trim the MTU to a multiple of 16 to meet the requirements of 12.17 377 * Query Hop in DSP0236 v1.3.0. 378 */ 379 sz = MCTP_BODY_SIZE(ASTLPC_BODY_SIZE(sz)); 380 sz &= ~0xfUL; 381 sz = ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(sz)); 382 383 if (astlpc->requested_mtu) { 384 size_t r; 385 386 r = ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(astlpc->requested_mtu)); 387 sz = MIN(sz, r); 388 } 389 390 /* Flip the buffers as the names are defined in terms of the host */ 391 astlpc->layout.tx.offset = control_size; 392 astlpc->layout.tx.size = sz; 393 astlpc->layout.rx.offset = 394 astlpc->layout.tx.offset + astlpc->layout.tx.size; 395 astlpc->layout.rx.size = sz; 396 397 if (!mctp_astlpc_layout_validate(&astlpc->layout)) { 398 astlpc_prerr(astlpc, "Cannot support an MTU of %zu", sz); 399 return -EINVAL; 400 } 401 402 hdr = (struct mctp_lpcmap_hdr){ 403 .magic = htobe32(ASTLPC_MCTP_MAGIC), 404 .bmc_ver_min = htobe16(ASTLPC_VER_MIN), 405 .bmc_ver_cur = htobe16(ASTLPC_VER_CUR), 406 407 /* Flip the buffers back as we're now describing the host's 408 * configuration to the host */ 409 .layout.rx_offset = htobe32(astlpc->layout.tx.offset), 410 .layout.rx_size = htobe32(astlpc->layout.tx.size), 411 .layout.tx_offset = htobe32(astlpc->layout.rx.offset), 412 .layout.tx_size = htobe32(astlpc->layout.rx.size), 413 }; 414 415 mctp_astlpc_lpc_write(astlpc, &hdr, 0, sizeof(hdr)); 416 417 /* set status indicating that the BMC is now active */ 418 status = KCS_STATUS_BMC_READY | KCS_STATUS_OBF; 419 return mctp_astlpc_kcs_set_status(astlpc, status); 420 } 421 422 static int mctp_binding_astlpc_start_bmc(struct mctp_binding *b) 423 { 424 struct mctp_binding_astlpc *astlpc = 425 container_of(b, struct mctp_binding_astlpc, binding); 426 427 return mctp_astlpc_init_bmc(astlpc); 428 } 429 430 static bool mctp_astlpc_validate_version(uint16_t bmc_ver_min, 431 uint16_t bmc_ver_cur, 432 uint16_t host_ver_min, 433 uint16_t host_ver_cur) 434 { 435 if (!(bmc_ver_min && bmc_ver_cur && host_ver_min && host_ver_cur)) { 436 mctp_prerr("Invalid version present in [%" PRIu16 ", %" PRIu16 437 "], [%" PRIu16 ", %" PRIu16 "]", 438 bmc_ver_min, bmc_ver_cur, host_ver_min, 439 host_ver_cur); 440 return false; 441 } else if (bmc_ver_min > bmc_ver_cur) { 442 mctp_prerr("Invalid bmc version range [%" PRIu16 ", %" PRIu16 443 "]", 444 bmc_ver_min, bmc_ver_cur); 445 return false; 446 } else if (host_ver_min > host_ver_cur) { 447 mctp_prerr("Invalid host version range [%" PRIu16 ", %" PRIu16 448 "]", 449 host_ver_min, host_ver_cur); 450 return false; 451 } else if ((host_ver_cur < bmc_ver_min) || 452 (host_ver_min > bmc_ver_cur)) { 453 mctp_prerr( 454 "Unable to satisfy version negotiation with ranges [%" PRIu16 455 ", %" PRIu16 "] and [%" PRIu16 ", %" PRIu16 "]", 456 bmc_ver_min, bmc_ver_cur, host_ver_min, host_ver_cur); 457 return false; 458 } 459 460 return true; 461 } 462 463 static int mctp_astlpc_negotiate_layout_host(struct mctp_binding_astlpc *astlpc) 464 { 465 struct mctp_astlpc_layout layout; 466 uint32_t sz; 467 int rc; 468 469 rc = mctp_astlpc_layout_read(astlpc, &layout); 470 if (rc < 0) 471 return rc; 472 473 if (!mctp_astlpc_layout_validate(&layout)) { 474 astlpc_prerr( 475 astlpc, 476 "BMC provided invalid buffer layout: Rx {0x%" PRIx32 477 ", %" PRIu32 "}, Tx {0x%" PRIx32 ", %" PRIu32 "}", 478 layout.rx.offset, layout.rx.size, layout.tx.offset, 479 layout.tx.size); 480 return -EINVAL; 481 } 482 483 astlpc_prinfo(astlpc, "Desire an MTU of %" PRIu32 " bytes", 484 astlpc->requested_mtu); 485 486 sz = ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(astlpc->requested_mtu)); 487 layout.rx.size = sz; 488 489 if (!mctp_astlpc_layout_validate(&layout)) { 490 astlpc_prerr( 491 astlpc, 492 "Generated invalid buffer layout with size %" PRIu32 493 ": Rx {0x%" PRIx32 ", %" PRIu32 "}, Tx {0x%" PRIx32 494 ", %" PRIu32 "}", 495 sz, layout.rx.offset, layout.rx.size, layout.tx.offset, 496 layout.tx.size); 497 return -EINVAL; 498 } 499 500 astlpc_prinfo(astlpc, "Requesting MTU of %" PRIu32 " bytes", 501 astlpc->requested_mtu); 502 503 return mctp_astlpc_layout_write(astlpc, &layout); 504 } 505 506 static int mctp_astlpc_init_host(struct mctp_binding_astlpc *astlpc) 507 { 508 const uint16_t ver_min_be = htobe16(ASTLPC_VER_MIN); 509 const uint16_t ver_cur_be = htobe16(ASTLPC_VER_CUR); 510 uint16_t bmc_ver_min, bmc_ver_cur; 511 struct mctp_lpcmap_hdr hdr; 512 uint8_t status; 513 int rc; 514 515 rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status); 516 if (rc) { 517 mctp_prwarn("KCS status read failed"); 518 return rc; 519 } 520 521 astlpc->kcs_status = status; 522 523 if (!(status & KCS_STATUS_BMC_READY)) 524 return -EHOSTDOWN; 525 526 mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr)); 527 528 bmc_ver_min = be16toh(hdr.bmc_ver_min); 529 bmc_ver_cur = be16toh(hdr.bmc_ver_cur); 530 531 if (!mctp_astlpc_validate_version(bmc_ver_min, bmc_ver_cur, 532 ASTLPC_VER_MIN, ASTLPC_VER_CUR)) { 533 astlpc_prerr(astlpc, "Cannot negotiate with invalid versions"); 534 return -EINVAL; 535 } 536 537 /* 538 * Negotation always chooses the highest protocol version that 539 * satisfies the version constraints. So check whether the BMC supports 540 * v2, and if so, negotiate in v2 style. 541 */ 542 if (ASTLPC_VER_CUR >= 2 && bmc_ver_cur >= 2) { 543 rc = mctp_astlpc_negotiate_layout_host(astlpc); 544 if (rc < 0) 545 return rc; 546 } 547 548 /* Version negotiation */ 549 mctp_astlpc_lpc_write(astlpc, &ver_min_be, 550 offsetof(struct mctp_lpcmap_hdr, host_ver_min), 551 sizeof(ver_min_be)); 552 553 mctp_astlpc_lpc_write(astlpc, &ver_cur_be, 554 offsetof(struct mctp_lpcmap_hdr, host_ver_cur), 555 sizeof(ver_cur_be)); 556 557 /* Send channel init command */ 558 rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, 0x0); 559 if (rc) { 560 astlpc_prwarn(astlpc, "KCS write failed"); 561 } 562 563 return rc; 564 } 565 566 static int mctp_binding_astlpc_start_host(struct mctp_binding *b) 567 { 568 struct mctp_binding_astlpc *astlpc = 569 container_of(b, struct mctp_binding_astlpc, binding); 570 571 return mctp_astlpc_init_host(astlpc); 572 } 573 574 static bool __mctp_astlpc_kcs_ready(struct mctp_binding_astlpc *astlpc, 575 uint8_t status, bool is_write) 576 { 577 bool is_bmc; 578 bool ready_state; 579 uint8_t flag; 580 581 is_bmc = (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC); 582 flag = (is_bmc ^ is_write) ? KCS_STATUS_IBF : KCS_STATUS_OBF; 583 ready_state = is_write ? 0 : 1; 584 585 return !!(status & flag) == ready_state; 586 } 587 588 static inline bool 589 mctp_astlpc_kcs_read_ready(struct mctp_binding_astlpc *astlpc, uint8_t status) 590 { 591 return __mctp_astlpc_kcs_ready(astlpc, status, false); 592 } 593 594 static inline bool 595 mctp_astlpc_kcs_write_ready(struct mctp_binding_astlpc *astlpc, uint8_t status) 596 { 597 return __mctp_astlpc_kcs_ready(astlpc, status, true); 598 } 599 600 static int mctp_astlpc_kcs_send(struct mctp_binding_astlpc *astlpc, 601 uint8_t data) 602 { 603 uint8_t status; 604 int rc; 605 606 for (;;) { 607 rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, 608 &status); 609 if (rc) { 610 astlpc_prwarn(astlpc, "KCS status read failed"); 611 return -1; 612 } 613 if (mctp_astlpc_kcs_write_ready(astlpc, status)) 614 break; 615 /* todo: timeout */ 616 } 617 618 rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, data); 619 if (rc) { 620 astlpc_prwarn(astlpc, "KCS data write failed"); 621 return -1; 622 } 623 624 return 0; 625 } 626 627 static int mctp_binding_astlpc_tx(struct mctp_binding *b, 628 struct mctp_pktbuf *pkt) 629 { 630 struct mctp_binding_astlpc *astlpc = binding_to_astlpc(b); 631 uint32_t len, len_be; 632 struct mctp_hdr *hdr; 633 634 hdr = mctp_pktbuf_hdr(pkt); 635 len = mctp_pktbuf_size(pkt); 636 637 astlpc_prdebug(astlpc, 638 "%s: Transmitting %" PRIu32 639 "-byte packet (%hhu, %hhu, 0x%hhx)", 640 __func__, len, hdr->src, hdr->dest, hdr->flags_seq_tag); 641 642 if (len > ASTLPC_BODY_SIZE(astlpc->layout.tx.size)) { 643 astlpc_prwarn(astlpc, "invalid TX len 0x%x", len); 644 return -1; 645 } 646 647 len_be = htobe32(len); 648 mctp_astlpc_lpc_write(astlpc, &len_be, astlpc->layout.tx.offset, 649 sizeof(len_be)); 650 mctp_astlpc_lpc_write(astlpc, hdr, astlpc->layout.tx.offset + 4, len); 651 652 mctp_binding_set_tx_enabled(b, false); 653 654 mctp_astlpc_kcs_send(astlpc, 0x1); 655 return 0; 656 } 657 658 static uint16_t mctp_astlpc_negotiate_version(uint16_t bmc_ver_min, 659 uint16_t bmc_ver_cur, 660 uint16_t host_ver_min, 661 uint16_t host_ver_cur) 662 { 663 if (!mctp_astlpc_validate_version(bmc_ver_min, bmc_ver_cur, 664 host_ver_min, host_ver_cur)) 665 return ASTLPC_VER_BAD; 666 667 if (bmc_ver_cur < host_ver_cur) 668 return bmc_ver_cur; 669 670 return host_ver_cur; 671 } 672 673 static uint32_t mctp_astlpc_calculate_mtu(struct mctp_binding_astlpc *astlpc, 674 struct mctp_astlpc_layout *layout) 675 { 676 uint32_t low, high, limit; 677 678 /* Derive the largest MTU the BMC _can_ support */ 679 low = MIN(astlpc->layout.rx.offset, astlpc->layout.tx.offset); 680 high = MAX(astlpc->layout.rx.offset, astlpc->layout.tx.offset); 681 limit = high - low; 682 683 /* Determine the largest MTU the BMC _wants_ to support */ 684 if (astlpc->requested_mtu) { 685 uint32_t req = astlpc->requested_mtu; 686 687 limit = MIN(limit, ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(req))); 688 } 689 690 /* Determine the accepted MTU, applied both directions by convention */ 691 return MCTP_BODY_SIZE(ASTLPC_BODY_SIZE(MIN(limit, layout->tx.size))); 692 } 693 694 static int mctp_astlpc_negotiate_layout_bmc(struct mctp_binding_astlpc *astlpc) 695 { 696 struct mctp_astlpc_layout proposed, pending; 697 uint32_t sz, mtu; 698 int rc; 699 700 /* Extract the host's proposed layout */ 701 rc = mctp_astlpc_layout_read(astlpc, &proposed); 702 if (rc < 0) 703 return rc; 704 705 if (!mctp_astlpc_layout_validate(&proposed)) 706 return -EINVAL; 707 708 /* Negotiate the MTU */ 709 mtu = mctp_astlpc_calculate_mtu(astlpc, &proposed); 710 sz = ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(mtu)); 711 712 /* 713 * Use symmetric MTUs by convention and to pass constraints in rx/tx 714 * functions 715 */ 716 pending = astlpc->layout; 717 pending.tx.size = sz; 718 pending.rx.size = sz; 719 720 if (mctp_astlpc_layout_validate(&pending)) { 721 /* We found a sensible Rx MTU, so honour it */ 722 astlpc->layout = pending; 723 724 /* Enforce the negotiated MTU */ 725 rc = mctp_astlpc_layout_write(astlpc, &astlpc->layout); 726 if (rc < 0) 727 return rc; 728 729 astlpc_prinfo(astlpc, "Negotiated an MTU of %" PRIu32 " bytes", 730 mtu); 731 } else { 732 astlpc_prwarn(astlpc, "MTU negotiation failed"); 733 return -EINVAL; 734 } 735 736 if (astlpc->version >= 2) 737 astlpc->binding.pkt_size = MCTP_PACKET_SIZE(mtu); 738 739 return 0; 740 } 741 742 static void mctp_astlpc_init_channel(struct mctp_binding_astlpc *astlpc) 743 { 744 uint16_t negotiated, negotiated_be; 745 struct mctp_lpcmap_hdr hdr; 746 uint8_t status; 747 int rc; 748 749 mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr)); 750 751 /* Version negotiation */ 752 negotiated = 753 mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_CUR, 754 be16toh(hdr.host_ver_min), 755 be16toh(hdr.host_ver_cur)); 756 757 /* Host Rx MTU negotiation: Failure terminates channel init */ 758 rc = mctp_astlpc_negotiate_layout_bmc(astlpc); 759 if (rc < 0) 760 negotiated = ASTLPC_VER_BAD; 761 762 /* Populate the negotiated version */ 763 astlpc->version = negotiated; 764 negotiated_be = htobe16(negotiated); 765 mctp_astlpc_lpc_write(astlpc, &negotiated_be, 766 offsetof(struct mctp_lpcmap_hdr, negotiated_ver), 767 sizeof(negotiated_be)); 768 769 /* Finalise the configuration */ 770 status = KCS_STATUS_BMC_READY | KCS_STATUS_OBF; 771 if (negotiated > 0) { 772 astlpc_prinfo(astlpc, "Negotiated binding version %" PRIu16, 773 negotiated); 774 status |= KCS_STATUS_CHANNEL_ACTIVE; 775 } else { 776 astlpc_prerr(astlpc, "Failed to initialise channel\n"); 777 } 778 779 mctp_astlpc_kcs_set_status(astlpc, status); 780 781 mctp_binding_set_tx_enabled(&astlpc->binding, 782 status & KCS_STATUS_CHANNEL_ACTIVE); 783 } 784 785 static void mctp_astlpc_rx_start(struct mctp_binding_astlpc *astlpc) 786 { 787 struct mctp_pktbuf *pkt; 788 uint32_t len; 789 790 mctp_astlpc_lpc_read(astlpc, &len, astlpc->layout.rx.offset, 791 sizeof(len)); 792 len = be32toh(len); 793 794 if (len > ASTLPC_BODY_SIZE(astlpc->layout.rx.size)) { 795 astlpc_prwarn(astlpc, "invalid RX len 0x%x", len); 796 return; 797 } 798 799 assert(astlpc->binding.pkt_size >= 0); 800 if (len > (uint32_t)astlpc->binding.pkt_size) { 801 mctp_prwarn("invalid RX len 0x%x", len); 802 astlpc_prwarn(astlpc, "invalid RX len 0x%x", len); 803 return; 804 } 805 806 pkt = mctp_pktbuf_alloc(&astlpc->binding, len); 807 if (!pkt) 808 goto out_complete; 809 810 mctp_astlpc_lpc_read(astlpc, mctp_pktbuf_hdr(pkt), 811 astlpc->layout.rx.offset + 4, len); 812 813 mctp_bus_rx(&astlpc->binding, pkt); 814 815 out_complete: 816 mctp_astlpc_kcs_send(astlpc, 0x2); 817 } 818 819 static void mctp_astlpc_tx_complete(struct mctp_binding_astlpc *astlpc) 820 { 821 mctp_binding_set_tx_enabled(&astlpc->binding, true); 822 } 823 824 static int mctp_astlpc_finalise_channel(struct mctp_binding_astlpc *astlpc) 825 { 826 struct mctp_astlpc_layout layout; 827 uint16_t negotiated; 828 int rc; 829 830 rc = mctp_astlpc_lpc_read(astlpc, &negotiated, 831 offsetof(struct mctp_lpcmap_hdr, 832 negotiated_ver), 833 sizeof(negotiated)); 834 if (rc < 0) 835 return rc; 836 837 negotiated = be16toh(negotiated); 838 839 if (negotiated == ASTLPC_VER_BAD || negotiated < ASTLPC_VER_MIN || 840 negotiated > ASTLPC_VER_CUR) { 841 astlpc_prerr(astlpc, "Failed to negotiate version, got: %u\n", 842 negotiated); 843 return -EINVAL; 844 } 845 846 astlpc->version = negotiated; 847 848 rc = mctp_astlpc_layout_read(astlpc, &layout); 849 if (rc < 0) 850 return rc; 851 852 if (!mctp_astlpc_layout_validate(&layout)) { 853 mctp_prerr("BMC proposed invalid buffer parameters"); 854 return -EINVAL; 855 } 856 857 astlpc->layout = layout; 858 859 if (negotiated >= 2) 860 astlpc->binding.pkt_size = 861 ASTLPC_BODY_SIZE(astlpc->layout.tx.size); 862 863 return 0; 864 } 865 866 static int mctp_astlpc_update_channel(struct mctp_binding_astlpc *astlpc, 867 uint8_t status) 868 { 869 uint8_t updated; 870 int rc = 0; 871 872 assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST); 873 874 updated = astlpc->kcs_status ^ status; 875 876 astlpc_prdebug(astlpc, "%s: status: 0x%x, update: 0x%x", __func__, 877 status, updated); 878 879 if (updated & KCS_STATUS_BMC_READY) { 880 if (status & KCS_STATUS_BMC_READY) { 881 astlpc->kcs_status = status; 882 return astlpc->binding.start(&astlpc->binding); 883 } else { 884 mctp_binding_set_tx_enabled(&astlpc->binding, false); 885 } 886 } 887 888 if (astlpc->version == 0 || updated & KCS_STATUS_CHANNEL_ACTIVE) { 889 bool enable; 890 891 rc = mctp_astlpc_finalise_channel(astlpc); 892 enable = (status & KCS_STATUS_CHANNEL_ACTIVE) && rc == 0; 893 894 mctp_binding_set_tx_enabled(&astlpc->binding, enable); 895 } 896 897 astlpc->kcs_status = status; 898 899 return rc; 900 } 901 902 int mctp_astlpc_poll(struct mctp_binding_astlpc *astlpc) 903 { 904 uint8_t status, data; 905 int rc; 906 907 rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status); 908 if (rc) { 909 astlpc_prwarn(astlpc, "KCS read error"); 910 return -1; 911 } 912 913 astlpc_prdebug(astlpc, "%s: status: 0x%hhx", __func__, status); 914 915 if (!mctp_astlpc_kcs_read_ready(astlpc, status)) 916 return 0; 917 918 rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_DATA, &data); 919 if (rc) { 920 astlpc_prwarn(astlpc, "KCS data read error"); 921 return -1; 922 } 923 924 astlpc_prdebug(astlpc, "%s: data: 0x%hhx", __func__, data); 925 926 switch (data) { 927 case 0x0: 928 mctp_astlpc_init_channel(astlpc); 929 break; 930 case 0x1: 931 mctp_astlpc_rx_start(astlpc); 932 break; 933 case 0x2: 934 mctp_astlpc_tx_complete(astlpc); 935 break; 936 case 0xff: 937 /* No responsibilities for the BMC on 0xff */ 938 if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST) { 939 rc = mctp_astlpc_update_channel(astlpc, status); 940 if (rc < 0) 941 return rc; 942 } 943 break; 944 default: 945 astlpc_prwarn(astlpc, "unknown message 0x%x", data); 946 } 947 948 /* Handle silent loss of bmc-ready */ 949 if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST) { 950 if (!(status & KCS_STATUS_BMC_READY && data == 0xff)) 951 return mctp_astlpc_update_channel(astlpc, status); 952 } 953 954 return rc; 955 } 956 957 /* allocate and basic initialisation */ 958 static struct mctp_binding_astlpc *__mctp_astlpc_init(uint8_t mode, 959 uint32_t mtu) 960 { 961 struct mctp_binding_astlpc *astlpc; 962 963 assert((mode == MCTP_BINDING_ASTLPC_MODE_BMC) || 964 (mode == MCTP_BINDING_ASTLPC_MODE_HOST)); 965 966 astlpc = __mctp_alloc(sizeof(*astlpc)); 967 if (!astlpc) 968 return NULL; 969 970 memset(astlpc, 0, sizeof(*astlpc)); 971 astlpc->mode = mode; 972 astlpc->lpc_map = NULL; 973 astlpc->requested_mtu = mtu; 974 astlpc->binding.name = "astlpc"; 975 astlpc->binding.version = 1; 976 astlpc->binding.pkt_size = MCTP_PACKET_SIZE(mtu); 977 astlpc->binding.pkt_pad = 0; 978 astlpc->binding.tx = mctp_binding_astlpc_tx; 979 if (mode == MCTP_BINDING_ASTLPC_MODE_BMC) 980 astlpc->binding.start = mctp_binding_astlpc_start_bmc; 981 else if (mode == MCTP_BINDING_ASTLPC_MODE_HOST) 982 astlpc->binding.start = mctp_binding_astlpc_start_host; 983 else { 984 astlpc_prerr(astlpc, "%s: Invalid mode: %d\n", __func__, mode); 985 __mctp_free(astlpc); 986 return NULL; 987 } 988 989 return astlpc; 990 } 991 992 struct mctp_binding *mctp_binding_astlpc_core(struct mctp_binding_astlpc *b) 993 { 994 return &b->binding; 995 } 996 997 struct mctp_binding_astlpc * 998 mctp_astlpc_init(uint8_t mode, uint32_t mtu, void *lpc_map, 999 const struct mctp_binding_astlpc_ops *ops, void *ops_data) 1000 { 1001 struct mctp_binding_astlpc *astlpc; 1002 1003 if (!(mode == MCTP_BINDING_ASTLPC_MODE_BMC || 1004 mode == MCTP_BINDING_ASTLPC_MODE_HOST)) { 1005 mctp_prerr("Unknown binding mode: %u", mode); 1006 return NULL; 1007 } 1008 1009 astlpc = __mctp_astlpc_init(mode, mtu); 1010 if (!astlpc) 1011 return NULL; 1012 1013 memcpy(&astlpc->ops, ops, sizeof(astlpc->ops)); 1014 astlpc->ops_data = ops_data; 1015 astlpc->lpc_map = lpc_map; 1016 astlpc->mode = mode; 1017 1018 return astlpc; 1019 } 1020 1021 struct mctp_binding_astlpc * 1022 mctp_astlpc_init_ops(const struct mctp_binding_astlpc_ops *ops, void *ops_data, 1023 void *lpc_map) 1024 { 1025 return mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, lpc_map, 1026 ops, ops_data); 1027 } 1028 1029 void mctp_astlpc_destroy(struct mctp_binding_astlpc *astlpc) 1030 { 1031 /* Clear channel-active and bmc-ready */ 1032 if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) 1033 mctp_astlpc_kcs_set_status(astlpc, KCS_STATUS_OBF); 1034 __mctp_free(astlpc); 1035 } 1036 1037 #ifdef MCTP_HAVE_FILEIO 1038 1039 static int mctp_astlpc_init_fileio_lpc(struct mctp_binding_astlpc *astlpc) 1040 { 1041 struct aspeed_lpc_ctrl_mapping map = { 1042 .window_type = ASPEED_LPC_CTRL_WINDOW_MEMORY, 1043 .window_id = 0, /* There's only one */ 1044 .flags = 0, 1045 .addr = 0, 1046 .offset = 0, 1047 .size = 0 1048 }; 1049 void *lpc_map_base; 1050 int fd, rc; 1051 1052 fd = open(lpc_path, O_RDWR | O_SYNC); 1053 if (fd < 0) { 1054 astlpc_prwarn(astlpc, "LPC open (%s) failed", lpc_path); 1055 return -1; 1056 } 1057 1058 rc = ioctl(fd, ASPEED_LPC_CTRL_IOCTL_GET_SIZE, &map); 1059 if (rc) { 1060 astlpc_prwarn(astlpc, "LPC GET_SIZE failed"); 1061 close(fd); 1062 return -1; 1063 } 1064 1065 lpc_map_base = 1066 mmap(NULL, map.size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 1067 if (lpc_map_base == MAP_FAILED) { 1068 astlpc_prwarn(astlpc, "LPC mmap failed"); 1069 rc = -1; 1070 } else { 1071 astlpc->lpc_map = lpc_map_base + map.size - LPC_WIN_SIZE; 1072 } 1073 1074 close(fd); 1075 1076 return rc; 1077 } 1078 1079 static int mctp_astlpc_init_fileio_kcs(struct mctp_binding_astlpc *astlpc) 1080 { 1081 astlpc->kcs_fd = open(kcs_path, O_RDWR); 1082 if (astlpc->kcs_fd < 0) 1083 return -1; 1084 1085 return 0; 1086 } 1087 1088 static int __mctp_astlpc_fileio_kcs_read(void *arg, 1089 enum mctp_binding_astlpc_kcs_reg reg, uint8_t *val) 1090 { 1091 struct mctp_binding_astlpc *astlpc = arg; 1092 off_t offset = reg; 1093 int rc; 1094 1095 rc = pread(astlpc->kcs_fd, val, 1, offset); 1096 1097 return rc == 1 ? 0 : -1; 1098 } 1099 1100 static int __mctp_astlpc_fileio_kcs_write(void *arg, 1101 enum mctp_binding_astlpc_kcs_reg reg, uint8_t val) 1102 { 1103 struct mctp_binding_astlpc *astlpc = arg; 1104 off_t offset = reg; 1105 int rc; 1106 1107 rc = pwrite(astlpc->kcs_fd, &val, 1, offset); 1108 1109 return rc == 1 ? 0 : -1; 1110 } 1111 1112 int mctp_astlpc_get_fd(struct mctp_binding_astlpc *astlpc) 1113 { 1114 return astlpc->kcs_fd; 1115 } 1116 1117 struct mctp_binding_astlpc *mctp_astlpc_init_fileio(void) 1118 { 1119 struct mctp_binding_astlpc *astlpc; 1120 int rc; 1121 1122 /* 1123 * If we're doing file IO then we're very likely not running 1124 * freestanding, so lets assume that we're on the BMC side 1125 */ 1126 astlpc = __mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU); 1127 if (!astlpc) 1128 return NULL; 1129 1130 /* Set internal operations for kcs. We use direct accesses to the lpc 1131 * map area */ 1132 astlpc->ops.kcs_read = __mctp_astlpc_fileio_kcs_read; 1133 astlpc->ops.kcs_write = __mctp_astlpc_fileio_kcs_write; 1134 astlpc->ops_data = astlpc; 1135 1136 rc = mctp_astlpc_init_fileio_lpc(astlpc); 1137 if (rc) { 1138 free(astlpc); 1139 return NULL; 1140 } 1141 1142 rc = mctp_astlpc_init_fileio_kcs(astlpc); 1143 if (rc) { 1144 free(astlpc); 1145 return NULL; 1146 } 1147 1148 return astlpc; 1149 } 1150 #else 1151 struct mctp_binding_astlpc * __attribute__((const)) 1152 mctp_astlpc_init_fileio(void) 1153 { 1154 astlpc_prerr(astlpc, "Missing support for file IO"); 1155 return NULL; 1156 } 1157 1158 int __attribute__((const)) mctp_astlpc_get_fd( 1159 struct mctp_binding_astlpc *astlpc __attribute__((unused))) 1160 { 1161 astlpc_prerr(astlpc, "Missing support for file IO"); 1162 return -1; 1163 } 1164 #endif 1165