1 /* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */ 2 3 #ifdef HAVE_CONFIG_H 4 #include "config.h" 5 #endif 6 7 #define ASTLPC_VER_CUR 2 8 #include "astlpc.c" 9 10 #ifdef pr_fmt 11 #undef pr_fmt 12 #define pr_fmt(x) "test: " x 13 #endif 14 15 #include "libmctp-astlpc.h" 16 #include "libmctp-log.h" 17 #include "container_of.h" 18 19 #ifdef NDEBUG 20 #undef NDEBUG 21 #endif 22 23 #include <assert.h> 24 #include <limits.h> 25 #include <stdint.h> 26 #include <stdio.h> 27 #include <stdlib.h> 28 #include <string.h> 29 30 #ifndef ARRAY_SIZE 31 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) 32 #endif 33 34 struct mctp_binding_astlpc_mmio { 35 struct mctp_binding_astlpc astlpc; 36 bool bmc; 37 38 uint8_t (*kcs)[2]; 39 40 size_t lpc_size; 41 uint8_t *lpc; 42 }; 43 44 struct astlpc_endpoint { 45 struct mctp_binding_astlpc_mmio mmio; 46 struct mctp_binding_astlpc *astlpc; 47 struct mctp *mctp; 48 }; 49 50 struct astlpc_test { 51 struct astlpc_endpoint bmc; 52 struct astlpc_endpoint host; 53 uint8_t kcs[2]; 54 uint8_t *lpc_mem; 55 56 void *msg; 57 uint8_t count; 58 }; 59 60 #define binding_to_mmio(b) \ 61 container_of(b, struct mctp_binding_astlpc_mmio, astlpc) 62 63 static int mctp_astlpc_mmio_kcs_read(void *data, 64 enum mctp_binding_astlpc_kcs_reg reg, 65 uint8_t *val) 66 { 67 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 68 69 *val = (*mmio->kcs)[reg]; 70 71 mctp_prdebug("%s: 0x%hhx from %s", __func__, *val, 72 reg ? "status" : "data"); 73 74 if (reg == MCTP_ASTLPC_KCS_REG_DATA) { 75 uint8_t flag = mmio->bmc ? KCS_STATUS_IBF : KCS_STATUS_OBF; 76 (*mmio->kcs)[MCTP_ASTLPC_KCS_REG_STATUS] &= ~flag; 77 } 78 79 return 0; 80 } 81 82 static int mctp_astlpc_mmio_kcs_write(void *data, 83 enum mctp_binding_astlpc_kcs_reg reg, 84 uint8_t val) 85 { 86 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 87 uint8_t *regp; 88 89 assert(reg == MCTP_ASTLPC_KCS_REG_DATA || 90 reg == MCTP_ASTLPC_KCS_REG_STATUS); 91 92 if (reg == MCTP_ASTLPC_KCS_REG_DATA) { 93 uint8_t flag = mmio->bmc ? KCS_STATUS_OBF : KCS_STATUS_IBF; 94 (*mmio->kcs)[MCTP_ASTLPC_KCS_REG_STATUS] |= flag; 95 } 96 97 regp = &(*mmio->kcs)[reg]; 98 if (reg == MCTP_ASTLPC_KCS_REG_STATUS) 99 *regp = (val & ~0xbU) | (val & *regp & 1); 100 else 101 *regp = val; 102 103 mctp_prdebug("%s: 0x%hhx to %s", __func__, val, 104 reg ? "status" : "data"); 105 106 return 0; 107 } 108 int mctp_astlpc_mmio_lpc_read(void *data, void *buf, long offset, size_t len) 109 { 110 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 111 112 mctp_prdebug("%s: %zu bytes from 0x%lx", __func__, len, offset); 113 114 assert(offset >= 0L); 115 assert(offset + len < mmio->lpc_size); 116 117 memcpy(buf, mmio->lpc + offset, len); 118 119 return 0; 120 } 121 122 int mctp_astlpc_mmio_lpc_write(void *data, const void *buf, long offset, 123 size_t len) 124 { 125 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 126 127 mctp_prdebug("%s: %zu bytes to 0x%lx", __func__, len, offset); 128 129 assert(offset >= 0L); 130 assert(offset + len < mmio->lpc_size); 131 132 memcpy(mmio->lpc + offset, buf, len); 133 134 return 0; 135 } 136 137 #define __unused __attribute__((unused)) 138 139 static void rx_message(uint8_t eid __unused, void *data __unused, void *msg, 140 size_t len) 141 { 142 struct astlpc_test *test = data; 143 144 mctp_prdebug("MCTP message received: msg: %p, len %zd", msg, len); 145 146 assert(len > 0); 147 assert(msg); 148 assert(test); 149 assert(test->msg); 150 assert(!memcmp(test->msg, msg, len)); 151 152 test->count++; 153 } 154 155 static const struct mctp_binding_astlpc_ops astlpc_direct_mmio_ops = { 156 .kcs_read = mctp_astlpc_mmio_kcs_read, 157 .kcs_write = mctp_astlpc_mmio_kcs_write, 158 }; 159 160 static const struct mctp_binding_astlpc_ops astlpc_indirect_mmio_ops = { 161 .kcs_read = mctp_astlpc_mmio_kcs_read, 162 .kcs_write = mctp_astlpc_mmio_kcs_write, 163 .lpc_read = mctp_astlpc_mmio_lpc_read, 164 .lpc_write = mctp_astlpc_mmio_lpc_write, 165 }; 166 167 static int endpoint_init(struct astlpc_endpoint *ep, mctp_eid_t eid, 168 uint8_t mode, uint32_t mtu, uint8_t (*kcs)[2], 169 void *lpc_mem) 170 { 171 /* 172 * Configure the direction of the KCS interface so we know whether to 173 * set or clear IBF or OBF on writes or reads. 174 */ 175 ep->mmio.bmc = (mode == MCTP_BINDING_ASTLPC_MODE_BMC); 176 177 ep->mctp = mctp_init(); 178 assert(ep->mctp); 179 180 /* Inject KCS registers */ 181 ep->mmio.kcs = kcs; 182 183 /* Initialise the binding */ 184 ep->astlpc = mctp_astlpc_init(mode, mtu, lpc_mem, 185 &astlpc_direct_mmio_ops, &ep->mmio); 186 assert(ep->astlpc); 187 188 return mctp_register_bus(ep->mctp, &ep->astlpc->binding, eid); 189 } 190 191 static void endpoint_destroy(struct astlpc_endpoint *ep) 192 { 193 mctp_astlpc_destroy(ep->astlpc); 194 mctp_destroy(ep->mctp); 195 } 196 197 static void network_init(struct astlpc_test *ctx) 198 { 199 int rc; 200 201 ctx->lpc_mem = calloc(1, 1 * 1024 * 1024); 202 assert(ctx->lpc_mem); 203 204 /* BMC initialisation */ 205 rc = endpoint_init(&ctx->bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 206 &ctx->kcs, ctx->lpc_mem); 207 assert(!rc); 208 assert(ctx->kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_BMC_READY); 209 210 /* Host initialisation */ 211 rc = endpoint_init(&ctx->host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, 212 MCTP_BTU, &ctx->kcs, ctx->lpc_mem); 213 assert(!rc); 214 215 /* BMC processes host channel init request, alerts host */ 216 mctp_astlpc_poll(ctx->bmc.astlpc); 217 assert(ctx->kcs[MCTP_ASTLPC_KCS_REG_STATUS] & 218 KCS_STATUS_CHANNEL_ACTIVE); 219 assert(ctx->kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0xff); 220 221 /* Host dequeues channel init result */ 222 mctp_astlpc_poll(ctx->host.astlpc); 223 } 224 225 static void network_destroy(struct astlpc_test *ctx) 226 { 227 endpoint_destroy(&ctx->bmc); 228 endpoint_destroy(&ctx->host); 229 free(ctx->lpc_mem); 230 } 231 232 static void astlpc_assert_tx_packet(struct astlpc_endpoint *src, 233 const void *expected, size_t len) 234 { 235 const size_t tx_body = src->astlpc->layout.tx.offset + 4 + 4; 236 const void *test = ((char *)src->astlpc->lpc_map) + tx_body; 237 assert(!memcmp(test, expected, len)); 238 } 239 240 static void astlpc_test_packetised_message_bmc_to_host(void) 241 { 242 struct astlpc_test ctx = { 0 }; 243 uint8_t msg[2 * MCTP_BTU]; 244 int rc; 245 246 /* Test harness initialisation */ 247 248 network_init(&ctx); 249 250 memset(&msg[0], 0x5a, MCTP_BTU); 251 memset(&msg[MCTP_BTU], 0xa5, MCTP_BTU); 252 253 ctx.msg = &msg[0]; 254 ctx.count = 0; 255 mctp_set_rx_all(ctx.host.mctp, rx_message, &ctx); 256 257 /* BMC sends a message */ 258 rc = mctp_message_tx(ctx.bmc.mctp, 9, msg, sizeof(msg)); 259 assert(rc == 0); 260 261 /* Host receives the first packet */ 262 mctp_astlpc_poll(ctx.host.astlpc); 263 264 /* BMC dequeues ownership hand-over and sends the queued packet */ 265 rc = mctp_astlpc_poll(ctx.bmc.astlpc); 266 assert(rc == 0); 267 268 /* Host receives the next packet */ 269 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 270 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01); 271 272 astlpc_assert_tx_packet(&ctx.bmc, &msg[MCTP_BTU], MCTP_BTU); 273 274 /* Host receives final packet */ 275 mctp_astlpc_poll(ctx.host.astlpc); 276 assert(ctx.count == 1); 277 278 network_destroy(&ctx); 279 } 280 281 static void astlpc_test_simple_message_host_to_bmc(void) 282 { 283 struct astlpc_test ctx = { 0 }; 284 uint8_t msg[MCTP_BTU]; 285 int rc; 286 287 /* Test harness initialisation */ 288 289 network_init(&ctx); 290 291 memset(&msg[0], 0xa5, MCTP_BTU); 292 293 ctx.msg = &msg[0]; 294 ctx.count = 0; 295 mctp_set_rx_all(ctx.bmc.mctp, rx_message, &ctx); 296 297 /* Host sends the single-packet message */ 298 rc = mctp_message_tx(ctx.host.mctp, 8, msg, sizeof(msg)); 299 assert(rc == 0); 300 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF); 301 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01); 302 303 astlpc_assert_tx_packet(&ctx.host, &msg[0], MCTP_BTU); 304 305 /* BMC receives the single-packet message */ 306 mctp_astlpc_poll(ctx.bmc.astlpc); 307 assert(ctx.count == 1); 308 309 /* BMC returns Tx area ownership to Host */ 310 assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF)); 311 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x02); 312 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 313 314 /* Host dequeues ownership hand-over and sends the queued packet */ 315 rc = mctp_astlpc_poll(ctx.host.astlpc); 316 assert(rc == 0); 317 318 network_destroy(&ctx); 319 } 320 321 static void astlpc_test_simple_message_bmc_to_host(void) 322 { 323 struct astlpc_test ctx = { 0 }; 324 uint8_t msg[MCTP_BTU]; 325 int rc; 326 327 /* Test harness initialisation */ 328 329 network_init(&ctx); 330 331 memset(&msg[0], 0x5a, MCTP_BTU); 332 333 ctx.msg = &msg[0]; 334 ctx.count = 0; 335 mctp_set_rx_all(ctx.host.mctp, rx_message, &ctx); 336 337 /* BMC sends the single-packet message */ 338 rc = mctp_message_tx(ctx.bmc.mctp, 9, msg, sizeof(msg)); 339 assert(rc == 0); 340 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 341 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01); 342 343 astlpc_assert_tx_packet(&ctx.bmc, &msg[0], MCTP_BTU); 344 345 /* Host receives the single-packet message */ 346 mctp_astlpc_poll(ctx.host.astlpc); 347 assert(ctx.count == 1); 348 349 /* Host returns Rx area ownership to BMC */ 350 assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF)); 351 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x02); 352 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF); 353 354 /* BMC dequeues ownership hand-over and sends the queued packet */ 355 rc = mctp_astlpc_poll(ctx.bmc.astlpc); 356 assert(rc == 0); 357 358 network_destroy(&ctx); 359 } 360 361 static void astlpc_test_host_before_bmc(void) 362 { 363 struct mctp_binding_astlpc_mmio mmio = { 0 }; 364 struct mctp_binding_astlpc *astlpc; 365 uint8_t kcs[2] = { 0 }; 366 struct mctp *mctp; 367 int rc; 368 369 mctp = mctp_init(); 370 assert(mctp); 371 372 /* Inject KCS registers */ 373 mmio.kcs = &kcs; 374 375 /* Initialise the binding */ 376 astlpc = mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, NULL, 377 &astlpc_direct_mmio_ops, &mmio); 378 379 /* Register the binding to trigger the start-up sequence */ 380 rc = mctp_register_bus(mctp, &astlpc->binding, 8); 381 382 /* Start-up should fail as we haven't initialised the BMC */ 383 assert(rc < 0); 384 385 mctp_astlpc_destroy(astlpc); 386 mctp_destroy(mctp); 387 } 388 389 static void astlpc_test_bad_version(void) 390 { 391 assert(0 == 392 mctp_astlpc_negotiate_version(ASTLPC_VER_BAD, ASTLPC_VER_CUR, 393 ASTLPC_VER_MIN, ASTLPC_VER_CUR)); 394 assert(0 == 395 mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_BAD, 396 ASTLPC_VER_MIN, ASTLPC_VER_CUR)); 397 assert(0 == 398 mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_CUR, 399 ASTLPC_VER_BAD, ASTLPC_VER_CUR)); 400 assert(0 == 401 mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_CUR, 402 ASTLPC_VER_MIN, ASTLPC_VER_BAD)); 403 assert(0 == mctp_astlpc_negotiate_version( 404 ASTLPC_VER_CUR + 1, ASTLPC_VER_CUR, ASTLPC_VER_MIN, 405 ASTLPC_VER_CUR + 1)); 406 assert(0 == mctp_astlpc_negotiate_version( 407 ASTLPC_VER_MIN, ASTLPC_VER_CUR + 1, 408 ASTLPC_VER_CUR + 1, ASTLPC_VER_CUR)); 409 } 410 411 static void astlpc_test_incompatible_versions(void) 412 { 413 assert(0 == mctp_astlpc_negotiate_version( 414 ASTLPC_VER_CUR, ASTLPC_VER_CUR, ASTLPC_VER_CUR + 1, 415 ASTLPC_VER_CUR + 1)); 416 assert(0 == mctp_astlpc_negotiate_version( 417 ASTLPC_VER_CUR + 1, ASTLPC_VER_CUR + 1, 418 ASTLPC_VER_CUR, ASTLPC_VER_CUR)); 419 } 420 421 static void astlpc_test_choose_bmc_ver_cur(void) 422 { 423 assert(2 == mctp_astlpc_negotiate_version(1, 2, 2, 3)); 424 } 425 426 static void astlpc_test_choose_host_ver_cur(void) 427 { 428 assert(2 == mctp_astlpc_negotiate_version(2, 3, 1, 2)); 429 } 430 431 static void astlpc_test_version_host_fails_negotiation(void) 432 { 433 struct astlpc_endpoint bmc, host; 434 struct mctp_lpcmap_hdr *hdr; 435 uint8_t kcs[2] = { 0 }; 436 void *lpc_mem; 437 int rc; 438 439 /* Test harness initialisation */ 440 lpc_mem = calloc(1, 1 * 1024 * 1024); 441 assert(lpc_mem); 442 443 /* BMC initialisation */ 444 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 445 &kcs, lpc_mem); 446 assert(!rc); 447 448 /* Now the BMC is initialised, break its version announcement */ 449 hdr = lpc_mem; 450 hdr->bmc_ver_cur = ASTLPC_VER_BAD; 451 452 /* Host initialisation */ 453 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 454 &kcs, lpc_mem); 455 assert(rc < 0); 456 457 endpoint_destroy(&bmc); 458 endpoint_destroy(&host); 459 free(lpc_mem); 460 } 461 462 static void astlpc_test_version_bmc_fails_negotiation(void) 463 { 464 struct astlpc_endpoint bmc, host; 465 struct mctp_lpcmap_hdr *hdr; 466 uint8_t kcs[2] = { 0 }; 467 void *lpc_mem; 468 int rc; 469 470 /* Test harness initialisation */ 471 lpc_mem = calloc(1, 1 * 1024 * 1024); 472 assert(lpc_mem); 473 474 /* BMC initialisation */ 475 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 476 &kcs, lpc_mem); 477 assert(!rc); 478 479 /* Host initialisation */ 480 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 481 &kcs, lpc_mem); 482 assert(!rc); 483 484 /* Now the host is initialised, break its version announcement */ 485 hdr = lpc_mem; 486 hdr->host_ver_cur = ASTLPC_VER_BAD; 487 488 /* Poll the BMC to detect the broken host version */ 489 mctp_astlpc_poll(bmc.astlpc); 490 assert(!(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_CHANNEL_ACTIVE)); 491 492 /* Poll the host so it detects failed negotiation */ 493 rc = mctp_astlpc_poll(host.astlpc); 494 assert(rc < 0); 495 496 endpoint_destroy(&bmc); 497 endpoint_destroy(&host); 498 free(lpc_mem); 499 } 500 501 static void astlpc_test_simple_init(void) 502 { 503 struct astlpc_endpoint bmc, host; 504 uint8_t kcs[2] = { 0 }; 505 void *lpc_mem; 506 int rc; 507 508 /* Test harness initialisation */ 509 lpc_mem = calloc(1, 1 * 1024 * 1024); 510 assert(lpc_mem); 511 512 /* BMC initialisation */ 513 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 514 &kcs, lpc_mem); 515 assert(!rc); 516 517 /* Verify the BMC binding was initialised */ 518 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_BMC_READY); 519 520 /* Host initialisation */ 521 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 522 &kcs, lpc_mem); 523 assert(!rc); 524 525 /* Host sends channel init command */ 526 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF); 527 assert(kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x00); 528 529 /* BMC receives host channel init request */ 530 mctp_astlpc_poll(bmc.astlpc); 531 532 /* BMC sends init response */ 533 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 534 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_CHANNEL_ACTIVE); 535 assert(kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0xff); 536 537 /* Host dequeues data */ 538 mctp_astlpc_poll(host.astlpc); 539 540 endpoint_destroy(&bmc); 541 endpoint_destroy(&host); 542 free(lpc_mem); 543 } 544 545 static void astlpc_test_simple_indirect_message_bmc_to_host(void) 546 { 547 struct astlpc_test ctx = { 0 }; 548 uint8_t kcs[2] = { 0 }; 549 uint8_t msg[MCTP_BTU]; 550 int rc; 551 552 ctx.lpc_mem = calloc(1, LPC_WIN_SIZE); 553 assert(ctx.lpc_mem); 554 555 /* Test message data */ 556 memset(&msg[0], 0x5a, MCTP_BTU); 557 558 /* Manually set up the network so we can inject the indirect ops */ 559 560 /* BMC initialisation */ 561 ctx.bmc.mmio.bmc = true; 562 ctx.bmc.mctp = mctp_init(); 563 assert(ctx.bmc.mctp); 564 ctx.bmc.mmio.kcs = &kcs; 565 ctx.bmc.mmio.lpc = ctx.lpc_mem; 566 ctx.bmc.mmio.lpc_size = LPC_WIN_SIZE; 567 ctx.bmc.astlpc = 568 mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, NULL, 569 &astlpc_indirect_mmio_ops, &ctx.bmc.mmio); 570 mctp_register_bus(ctx.bmc.mctp, &ctx.bmc.astlpc->binding, 8); 571 572 /* Host initialisation */ 573 ctx.host.mmio.bmc = false; 574 ctx.host.mctp = mctp_init(); 575 assert(ctx.host.mctp); 576 ctx.host.mmio.kcs = &kcs; 577 ctx.host.mmio.lpc = ctx.lpc_mem; 578 ctx.host.mmio.lpc_size = LPC_WIN_SIZE; 579 ctx.host.astlpc = 580 mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, NULL, 581 &astlpc_indirect_mmio_ops, &ctx.host.mmio); 582 mctp_register_bus(ctx.host.mctp, &ctx.host.astlpc->binding, 9); 583 584 /* BMC processes host channel init request, alerts host */ 585 mctp_astlpc_poll(ctx.bmc.astlpc); 586 587 /* Host dequeues channel init result */ 588 mctp_astlpc_poll(ctx.host.astlpc); 589 590 ctx.msg = &msg[0]; 591 ctx.count = 0; 592 mctp_set_rx_all(ctx.host.mctp, rx_message, &ctx); 593 594 /* BMC sends the single-packet message */ 595 rc = mctp_message_tx(ctx.bmc.mctp, 9, msg, sizeof(msg)); 596 assert(rc == 0); 597 598 /* Host receives the single-packet message */ 599 rc = mctp_astlpc_poll(ctx.host.astlpc); 600 assert(rc == 0); 601 assert(ctx.count == 1); 602 603 /* BMC dequeues ownership hand-over and sends the queued packet */ 604 rc = mctp_astlpc_poll(ctx.bmc.astlpc); 605 assert(rc == 0); 606 607 /* Can still tear-down the network in the normal fashion */ 608 network_destroy(&ctx); 609 } 610 611 static void astlpc_test_host_tx_bmc_gone(void) 612 { 613 struct astlpc_test ctx = { 0 }; 614 uint8_t unwritten[MCTP_BTU]; 615 uint8_t msg[MCTP_BTU]; 616 int rc; 617 618 /* Test harness initialisation */ 619 620 network_init(&ctx); 621 622 memset(&msg[0], 0x5a, sizeof(msg)); 623 memset(&unwritten[0], 0, sizeof(unwritten)); 624 625 ctx.msg = &msg[0]; 626 ctx.count = 0; 627 628 /* Clear bmc-ready */ 629 endpoint_destroy(&ctx.bmc); 630 631 /* Host detects that the BMC is disabled */ 632 mctp_astlpc_poll(ctx.host.astlpc); 633 634 /* Host attempts to send the single-packet message, but is prevented */ 635 rc = mctp_message_tx(ctx.host.mctp, 8, msg, sizeof(msg)); 636 assert(rc == 0); 637 assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF)); 638 astlpc_assert_tx_packet(&ctx.host, &unwritten[0], MCTP_BTU); 639 640 /* BMC comes back */ 641 rc = endpoint_init(&ctx.bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 642 &ctx.kcs, ctx.lpc_mem); 643 assert(!rc); 644 mctp_set_rx_all(ctx.bmc.mctp, rx_message, &ctx); 645 646 /* Host triggers channel init */ 647 mctp_astlpc_poll(ctx.host.astlpc); 648 649 /* BMC handles channel init */ 650 mctp_astlpc_poll(ctx.bmc.astlpc); 651 652 /* Host completes channel init, flushing the Tx queue */ 653 mctp_astlpc_poll(ctx.host.astlpc); 654 655 /* BMC receives the single-packet message */ 656 mctp_astlpc_poll(ctx.bmc.astlpc); 657 assert(ctx.count == 1); 658 659 network_destroy(&ctx); 660 } 661 662 static void astlpc_test_poll_not_ready(void) 663 { 664 struct astlpc_endpoint bmc; 665 uint8_t kcs[2] = { 0 }; 666 void *lpc_mem; 667 int rc; 668 669 /* Test harness initialisation */ 670 lpc_mem = calloc(1, 1 * 1024 * 1024); 671 assert(lpc_mem); 672 673 /* BMC initialisation */ 674 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 675 &kcs, lpc_mem); 676 assert(!rc); 677 678 /* Check for a command despite none present */ 679 rc = mctp_astlpc_poll(bmc.astlpc); 680 681 /* Make sure it doesn't fail */ 682 assert(rc == 0); 683 684 endpoint_destroy(&bmc); 685 free(lpc_mem); 686 } 687 688 static void astlpc_test_undefined_command(void) 689 { 690 struct astlpc_endpoint bmc; 691 uint8_t kcs[2] = { 0 }; 692 void *lpc_mem; 693 int rc; 694 695 /* Test harness initialisation */ 696 lpc_mem = calloc(1, 1 * 1024 * 1024); 697 assert(lpc_mem); 698 699 /* BMC initialisation */ 700 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 701 &kcs, lpc_mem); 702 assert(!rc); 703 704 /* 0x5a isn't legal in v1 or v2 */ 705 kcs[MCTP_ASTLPC_KCS_REG_DATA] = 0x5a; 706 kcs[MCTP_ASTLPC_KCS_REG_STATUS] |= KCS_STATUS_IBF; 707 708 /* Check for a command despite none present */ 709 rc = mctp_astlpc_poll(bmc.astlpc); 710 711 /* Make sure it doesn't fail, bad command should be discarded */ 712 assert(rc == 0); 713 714 endpoint_destroy(&bmc); 715 free(lpc_mem); 716 } 717 718 #define BUFFER_MIN ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(MCTP_BTU)) 719 720 static void astlpc_test_buffers_rx_offset_overflow(void) 721 { 722 struct mctp_astlpc_layout l = { 723 .rx = { UINT32_MAX, BUFFER_MIN }, 724 .tx = { control_size, BUFFER_MIN }, 725 }; 726 727 assert(!mctp_astlpc_layout_validate(&l)); 728 } 729 730 static void astlpc_test_buffers_tx_offset_overflow(void) 731 { 732 struct mctp_astlpc_layout l = { 733 .rx = { control_size, BUFFER_MIN }, 734 .tx = { UINT32_MAX, BUFFER_MIN }, 735 }; 736 737 assert(!mctp_astlpc_layout_validate(&l)); 738 } 739 740 static void astlpc_test_buffers_rx_size_overflow(void) 741 { 742 struct mctp_astlpc_layout l = { 743 .rx = { control_size + BUFFER_MIN, UINT32_MAX }, 744 .tx = { control_size, BUFFER_MIN }, 745 }; 746 747 assert(!mctp_astlpc_layout_validate(&l)); 748 } 749 750 static void astlpc_test_buffers_tx_size_overflow(void) 751 { 752 struct mctp_astlpc_layout l = { 753 .rx = { control_size, BUFFER_MIN }, 754 .tx = { control_size + BUFFER_MIN, UINT32_MAX }, 755 }; 756 757 assert(!mctp_astlpc_layout_validate(&l)); 758 } 759 760 static void astlpc_test_buffers_rx_window_violation(void) 761 { 762 struct mctp_astlpc_layout l = { 763 .rx = { LPC_WIN_SIZE - BUFFER_MIN + 1, BUFFER_MIN }, 764 .tx = { control_size, BUFFER_MIN }, 765 }; 766 767 assert(!mctp_astlpc_layout_validate(&l)); 768 } 769 770 static void astlpc_test_buffers_tx_window_violation(void) 771 { 772 struct mctp_astlpc_layout l = { 773 .rx = { control_size, BUFFER_MIN }, 774 .tx = { LPC_WIN_SIZE - BUFFER_MIN + 1, BUFFER_MIN }, 775 }; 776 777 assert(!mctp_astlpc_layout_validate(&l)); 778 } 779 780 static void astlpc_test_buffers_rx_size_fails_btu(void) 781 { 782 struct mctp_astlpc_layout l = { 783 .rx = { control_size, BUFFER_MIN - 1 }, 784 .tx = { control_size + BUFFER_MIN, BUFFER_MIN }, 785 }; 786 787 assert(!mctp_astlpc_layout_validate(&l)); 788 } 789 790 static void astlpc_test_buffers_tx_size_fails_btu(void) 791 { 792 struct mctp_astlpc_layout l = { 793 .rx = { control_size, BUFFER_MIN }, 794 .tx = { control_size + BUFFER_MIN, BUFFER_MIN - 1 }, 795 }; 796 797 assert(!mctp_astlpc_layout_validate(&l)); 798 } 799 800 static void astlpc_test_buffers_overlap_rx_low(void) 801 { 802 struct mctp_astlpc_layout l = { 803 .rx = { control_size, 2 * BUFFER_MIN }, 804 .tx = { control_size + BUFFER_MIN, 2 * BUFFER_MIN }, 805 }; 806 807 assert(!mctp_astlpc_layout_validate(&l)); 808 } 809 810 static void astlpc_test_buffers_overlap_tx_low(void) 811 { 812 struct mctp_astlpc_layout l = { 813 .rx = { control_size + BUFFER_MIN, 2 * BUFFER_MIN }, 814 .tx = { control_size, 2 * BUFFER_MIN }, 815 }; 816 817 assert(!mctp_astlpc_layout_validate(&l)); 818 } 819 820 static void astlpc_test_buffers_overlap_exact(void) 821 { 822 struct mctp_astlpc_layout l = { 823 .rx = { control_size, 2 * BUFFER_MIN }, 824 .tx = { control_size, 2 * BUFFER_MIN }, 825 }; 826 827 assert(!mctp_astlpc_layout_validate(&l)); 828 } 829 830 static void astlpc_test_buffers_overlap_control(void) 831 { 832 struct mctp_astlpc_layout l = { 833 .rx = { 0, BUFFER_MIN }, 834 .tx = { control_size + BUFFER_MIN, BUFFER_MIN }, 835 }; 836 837 assert(!mctp_astlpc_layout_validate(&l)); 838 } 839 840 static void astlpc_test_buffers_bad_host_proposal(void) 841 { 842 struct astlpc_endpoint bmc, host; 843 struct mctp_lpcmap_hdr *hdr; 844 uint8_t kcs[2] = { 0 }; 845 void *lpc_mem; 846 int rc; 847 848 /* Test harness initialisation */ 849 lpc_mem = calloc(1, 1 * 1024 * 1024); 850 assert(lpc_mem); 851 852 /* BMC initialisation */ 853 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 854 &kcs, lpc_mem); 855 assert(!rc); 856 857 /* Host initialisation */ 858 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 859 &kcs, lpc_mem); 860 assert(!rc); 861 862 /* 863 * Now that the host has initialised the control area, break 864 * something before polling the BMC 865 */ 866 hdr = lpc_mem; 867 hdr->layout.rx_size = 0; 868 869 mctp_astlpc_poll(bmc.astlpc); 870 871 /* Make sure the BMC has not set the channel to active */ 872 assert(!(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_CHANNEL_ACTIVE)); 873 874 endpoint_destroy(&host); 875 endpoint_destroy(&bmc); 876 free(lpc_mem); 877 } 878 879 static void astlpc_test_buffers_bad_bmc_proposal(void) 880 { 881 struct astlpc_endpoint bmc, host; 882 struct mctp_lpcmap_hdr *hdr; 883 uint8_t kcs[2] = { 0 }; 884 void *lpc_mem; 885 int rc; 886 887 /* Test harness initialisation */ 888 lpc_mem = calloc(1, 1 * 1024 * 1024); 889 assert(lpc_mem); 890 891 /* BMC initialisation */ 892 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 893 &kcs, lpc_mem); 894 assert(!rc); 895 896 /* 897 * Now that the BMC has initialised the control area, break something 898 * before initialising the host 899 */ 900 hdr = lpc_mem; 901 hdr->layout.rx_size = 0; 902 903 /* Host initialisation: Fails due to bad layout */ 904 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 905 &kcs, lpc_mem); 906 assert(rc < 0); 907 908 endpoint_destroy(&host); 909 endpoint_destroy(&bmc); 910 free(lpc_mem); 911 } 912 913 static void astlpc_test_buffers_bad_bmc_negotiation(void) 914 { 915 struct astlpc_endpoint bmc, host; 916 struct mctp_lpcmap_hdr *hdr; 917 uint8_t kcs[2] = { 0 }; 918 void *lpc_mem; 919 int rc; 920 921 /* Test harness initialisation */ 922 lpc_mem = calloc(1, 1 * 1024 * 1024); 923 assert(lpc_mem); 924 925 /* BMC initialisation */ 926 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 927 &kcs, lpc_mem); 928 assert(!rc); 929 930 /* Host initialisation */ 931 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 932 &kcs, lpc_mem); 933 assert(!rc); 934 935 mctp_astlpc_poll(bmc.astlpc); 936 937 /* 938 * Now that the BMC has initialised the control area, break something 939 * before polling the host 940 */ 941 hdr = lpc_mem; 942 hdr->layout.rx_size = 0; 943 944 rc = mctp_astlpc_poll(host.astlpc); 945 assert(rc < 0); 946 947 endpoint_destroy(&host); 948 endpoint_destroy(&bmc); 949 free(lpc_mem); 950 } 951 952 static void astlpc_test_buffers_bad_host_init(void) 953 { 954 struct astlpc_endpoint host; 955 uint8_t kcs[2] = { 0 }; 956 void *lpc_mem; 957 int rc; 958 959 /* Test harness initialisation */ 960 lpc_mem = calloc(1, 1 * 1024 * 1024); 961 assert(lpc_mem); 962 963 host.mctp = mctp_init(); 964 assert(host.mctp); 965 host.mmio.kcs = &kcs; 966 host.mmio.bmc = false; 967 968 /* Set the MTU to 0 to provoke a failure */ 969 host.astlpc = 970 mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_HOST, 0, lpc_mem, 971 &astlpc_direct_mmio_ops, &host.mmio); 972 973 rc = mctp_register_bus(host.mctp, &host.astlpc->binding, 8); 974 assert(rc < 0); 975 976 mctp_astlpc_destroy(host.astlpc); 977 mctp_destroy(host.mctp); 978 free(lpc_mem); 979 } 980 981 static void astlpc_test_negotiate_increased_mtu(void) 982 { 983 struct astlpc_endpoint bmc, host; 984 uint8_t kcs[2] = { 0 }; 985 void *lpc_mem; 986 int rc; 987 988 /* Test harness initialisation */ 989 lpc_mem = calloc(1, 1 * 1024 * 1024); 990 assert(lpc_mem); 991 992 /* BMC initialisation */ 993 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, 3 * MCTP_BTU, 994 &kcs, lpc_mem); 995 assert(!rc); 996 997 /* Host initialisation */ 998 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, 999 2 * MCTP_BTU, &kcs, lpc_mem); 1000 assert(!rc); 1001 1002 rc = mctp_astlpc_poll(bmc.astlpc); 1003 assert(rc == 0); 1004 1005 rc = mctp_astlpc_poll(host.astlpc); 1006 assert(rc == 0); 1007 1008 endpoint_destroy(&host); 1009 endpoint_destroy(&bmc); 1010 free(lpc_mem); 1011 } 1012 1013 static void astlpc_test_negotiate_mtu_low_high(void) 1014 { 1015 struct astlpc_endpoint bmc, host; 1016 uint8_t kcs[2] = { 0 }; 1017 void *lpc_mem; 1018 int rc; 1019 1020 /* Test harness initialisation */ 1021 lpc_mem = calloc(1, 1 * 1024 * 1024); 1022 assert(lpc_mem); 1023 1024 /* BMC initialisation */ 1025 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, 3 * MCTP_BTU, 1026 &kcs, lpc_mem); 1027 assert(!rc); 1028 1029 /* Host initialisation with low MTU */ 1030 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, 1031 2 * MCTP_BTU, &kcs, lpc_mem); 1032 assert(!rc); 1033 1034 /* Process low MTU proposal */ 1035 rc = mctp_astlpc_poll(bmc.astlpc); 1036 assert(rc == 0); 1037 1038 /* Accept low MTU proposal */ 1039 rc = mctp_astlpc_poll(host.astlpc); 1040 assert(rc == 0); 1041 1042 assert(host.astlpc->layout.rx.size == 1043 ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(2 * MCTP_BTU))); 1044 1045 /* Tear-down the host so we can bring up a new one */ 1046 endpoint_destroy(&host); 1047 1048 /* 1049 * Bring up a new host endpoint with a higher MTU than we previously 1050 * negotiated 1051 */ 1052 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, 1053 3 * MCTP_BTU, &kcs, lpc_mem); 1054 assert(!rc); 1055 1056 /* Process high MTU proposal */ 1057 rc = mctp_astlpc_poll(bmc.astlpc); 1058 assert(rc == 0); 1059 1060 /* Accept high MTU proposal */ 1061 rc = mctp_astlpc_poll(host.astlpc); 1062 assert(rc == 0); 1063 1064 assert(host.astlpc->layout.rx.size == 1065 ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(3 * MCTP_BTU))); 1066 1067 endpoint_destroy(&host); 1068 endpoint_destroy(&bmc); 1069 free(lpc_mem); 1070 } 1071 1072 /* clang-format off */ 1073 #define TEST_CASE(test) { #test, test } 1074 static const struct { 1075 const char *name; 1076 void (*test)(void); 1077 } astlpc_tests[] = { 1078 TEST_CASE(astlpc_test_simple_init), 1079 TEST_CASE(astlpc_test_bad_version), 1080 TEST_CASE(astlpc_test_incompatible_versions), 1081 TEST_CASE(astlpc_test_choose_bmc_ver_cur), 1082 TEST_CASE(astlpc_test_choose_host_ver_cur), 1083 TEST_CASE(astlpc_test_version_host_fails_negotiation), 1084 TEST_CASE(astlpc_test_version_bmc_fails_negotiation), 1085 TEST_CASE(astlpc_test_host_before_bmc), 1086 TEST_CASE(astlpc_test_simple_message_bmc_to_host), 1087 TEST_CASE(astlpc_test_simple_message_host_to_bmc), 1088 TEST_CASE(astlpc_test_packetised_message_bmc_to_host), 1089 TEST_CASE(astlpc_test_simple_indirect_message_bmc_to_host), 1090 TEST_CASE(astlpc_test_host_tx_bmc_gone), 1091 TEST_CASE(astlpc_test_poll_not_ready), 1092 TEST_CASE(astlpc_test_undefined_command), 1093 TEST_CASE(astlpc_test_buffers_rx_offset_overflow), 1094 TEST_CASE(astlpc_test_buffers_tx_offset_overflow), 1095 TEST_CASE(astlpc_test_buffers_rx_size_overflow), 1096 TEST_CASE(astlpc_test_buffers_tx_size_overflow), 1097 TEST_CASE(astlpc_test_buffers_rx_window_violation), 1098 TEST_CASE(astlpc_test_buffers_tx_window_violation), 1099 TEST_CASE(astlpc_test_buffers_rx_size_fails_btu), 1100 TEST_CASE(astlpc_test_buffers_tx_size_fails_btu), 1101 TEST_CASE(astlpc_test_buffers_overlap_rx_low), 1102 TEST_CASE(astlpc_test_buffers_overlap_tx_low), 1103 TEST_CASE(astlpc_test_buffers_bad_host_proposal), 1104 TEST_CASE(astlpc_test_buffers_bad_bmc_proposal), 1105 TEST_CASE(astlpc_test_buffers_bad_bmc_negotiation), 1106 TEST_CASE(astlpc_test_buffers_overlap_exact), 1107 TEST_CASE(astlpc_test_buffers_overlap_control), 1108 TEST_CASE(astlpc_test_buffers_bad_host_init), 1109 TEST_CASE(astlpc_test_negotiate_increased_mtu), 1110 TEST_CASE(astlpc_test_negotiate_mtu_low_high), 1111 }; 1112 /* clang-format on */ 1113 1114 #ifndef BUILD_ASSERT 1115 #define BUILD_ASSERT(x) \ 1116 do { \ 1117 (void)sizeof(char[0 - (!(x))]); \ 1118 } while (0) 1119 #endif 1120 1121 int main(void) 1122 { 1123 size_t i; 1124 1125 mctp_set_log_stdio(MCTP_LOG_DEBUG); 1126 1127 BUILD_ASSERT(ARRAY_SIZE(astlpc_tests) < SIZE_MAX); 1128 for (i = 0; i < ARRAY_SIZE(astlpc_tests); i++) { 1129 mctp_prlog(MCTP_LOG_DEBUG, "begin: %s", astlpc_tests[i].name); 1130 astlpc_tests[i].test(); 1131 mctp_prlog(MCTP_LOG_DEBUG, "end: %s\n", astlpc_tests[i].name); 1132 } 1133 1134 return 0; 1135 } 1136