1 /* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */ 2 3 #ifdef HAVE_CONFIG_H 4 #include "config.h" 5 #endif 6 7 #define ASTLPC_VER_CUR 3 8 #include "astlpc.c" 9 10 #ifdef pr_fmt 11 #undef pr_fmt 12 #define pr_fmt(x) "test: " x 13 #endif 14 15 #include "compiler.h" 16 #include "container_of.h" 17 #include "libmctp-astlpc.h" 18 #include "libmctp-log.h" 19 20 #ifdef NDEBUG 21 #undef NDEBUG 22 #endif 23 24 #include <assert.h> 25 #include <limits.h> 26 #include <stdint.h> 27 #include <stdio.h> 28 #include <stdlib.h> 29 #include <string.h> 30 #include <sys/random.h> 31 #include <unistd.h> 32 33 #ifndef ARRAY_SIZE 34 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) 35 #endif 36 37 struct mctp_binding_astlpc_mmio { 38 struct mctp_binding_astlpc astlpc; 39 bool bmc; 40 41 uint8_t (*kcs)[2]; 42 43 size_t lpc_size; 44 uint8_t *lpc; 45 }; 46 47 struct astlpc_endpoint { 48 struct mctp_binding_astlpc_mmio mmio; 49 struct mctp_binding_astlpc *astlpc; 50 struct mctp *mctp; 51 }; 52 53 struct astlpc_test { 54 struct astlpc_endpoint bmc; 55 struct astlpc_endpoint host; 56 uint8_t kcs[2]; 57 uint8_t *lpc_mem; 58 59 void *msg; 60 uint8_t count; 61 }; 62 63 #define binding_to_mmio(b) \ 64 container_of(b, struct mctp_binding_astlpc_mmio, astlpc) 65 66 static int mctp_astlpc_mmio_kcs_read(void *data, 67 enum mctp_binding_astlpc_kcs_reg reg, 68 uint8_t *val) 69 { 70 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 71 72 *val = (*mmio->kcs)[reg]; 73 74 mctp_prdebug("%s: 0x%hhx from %s", __func__, *val, 75 reg ? "status" : "data"); 76 77 if (reg == MCTP_ASTLPC_KCS_REG_DATA) { 78 uint8_t flag = mmio->bmc ? KCS_STATUS_IBF : KCS_STATUS_OBF; 79 (*mmio->kcs)[MCTP_ASTLPC_KCS_REG_STATUS] &= ~flag; 80 } 81 82 return 0; 83 } 84 85 static int mctp_astlpc_mmio_kcs_write(void *data, 86 enum mctp_binding_astlpc_kcs_reg reg, 87 uint8_t val) 88 { 89 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 90 uint8_t *regp; 91 92 assert(reg == MCTP_ASTLPC_KCS_REG_DATA || 93 reg == MCTP_ASTLPC_KCS_REG_STATUS); 94 95 if (reg == MCTP_ASTLPC_KCS_REG_DATA) { 96 uint8_t flag = mmio->bmc ? KCS_STATUS_OBF : KCS_STATUS_IBF; 97 (*mmio->kcs)[MCTP_ASTLPC_KCS_REG_STATUS] |= flag; 98 } 99 100 regp = &(*mmio->kcs)[reg]; 101 if (reg == MCTP_ASTLPC_KCS_REG_STATUS) 102 *regp = (val & ~0xbU) | (val & *regp & 1); 103 else 104 *regp = val; 105 106 mctp_prdebug("%s: 0x%hhx to %s", __func__, val, 107 reg ? "status" : "data"); 108 109 return 0; 110 } 111 112 static const struct mctp_binding_astlpc_ops astlpc_direct_mmio_ops = { 113 .kcs_read = mctp_astlpc_mmio_kcs_read, 114 .kcs_write = mctp_astlpc_mmio_kcs_write, 115 }; 116 117 int mctp_astlpc_mmio_lpc_read(void *data, void *buf, long offset, size_t len) 118 { 119 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 120 121 mctp_prdebug("%s: %zu bytes from 0x%lx", __func__, len, offset); 122 123 assert(offset >= 0L); 124 assert(offset + len < mmio->lpc_size); 125 126 memcpy(buf, mmio->lpc + offset, len); 127 128 return 0; 129 } 130 131 int mctp_astlpc_mmio_lpc_write(void *data, const void *buf, long offset, 132 size_t len) 133 { 134 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 135 136 mctp_prdebug("%s: %zu bytes to 0x%lx", __func__, len, offset); 137 138 assert(offset >= 0L); 139 assert(offset + len < mmio->lpc_size); 140 141 memcpy(mmio->lpc + offset, buf, len); 142 143 return 0; 144 } 145 146 static const struct mctp_binding_astlpc_ops astlpc_indirect_mmio_ops = { 147 .kcs_read = mctp_astlpc_mmio_kcs_read, 148 .kcs_write = mctp_astlpc_mmio_kcs_write, 149 .lpc_read = mctp_astlpc_mmio_lpc_read, 150 .lpc_write = mctp_astlpc_mmio_lpc_write, 151 }; 152 153 static void rx_message(uint8_t eid __unused, void *data __unused, void *msg, 154 size_t len) 155 { 156 struct astlpc_test *test = data; 157 158 mctp_prdebug("MCTP message received: msg: %p, len %zd", msg, len); 159 160 assert(len > 0); 161 assert(msg); 162 assert(test); 163 assert(test->msg); 164 assert(!memcmp(test->msg, msg, len)); 165 166 test->count++; 167 } 168 169 static int endpoint_init(struct astlpc_endpoint *ep, mctp_eid_t eid, 170 uint8_t mode, uint32_t mtu, uint8_t (*kcs)[2], 171 void *lpc_mem) 172 { 173 /* 174 * Configure the direction of the KCS interface so we know whether to 175 * set or clear IBF or OBF on writes or reads. 176 */ 177 ep->mmio.bmc = (mode == MCTP_BINDING_ASTLPC_MODE_BMC); 178 179 ep->mctp = mctp_init(); 180 assert(ep->mctp); 181 182 /* Inject KCS registers */ 183 ep->mmio.kcs = kcs; 184 185 /* Initialise the binding */ 186 ep->astlpc = mctp_astlpc_init(mode, mtu, lpc_mem, 187 &astlpc_direct_mmio_ops, &ep->mmio); 188 assert(ep->astlpc); 189 190 return mctp_register_bus(ep->mctp, &ep->astlpc->binding, eid); 191 } 192 193 static void endpoint_destroy(struct astlpc_endpoint *ep) 194 { 195 mctp_astlpc_destroy(ep->astlpc); 196 mctp_destroy(ep->mctp); 197 } 198 199 static void network_init(struct astlpc_test *ctx) 200 { 201 int rc; 202 203 ctx->lpc_mem = calloc(1, 1 * 1024 * 1024); 204 assert(ctx->lpc_mem); 205 206 /* BMC initialisation */ 207 rc = endpoint_init(&ctx->bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 208 &ctx->kcs, ctx->lpc_mem); 209 assert(!rc); 210 assert(ctx->kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_BMC_READY); 211 212 /* Host initialisation */ 213 rc = endpoint_init(&ctx->host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, 214 MCTP_BTU, &ctx->kcs, ctx->lpc_mem); 215 assert(!rc); 216 217 /* BMC processes host channel init request, alerts host */ 218 mctp_astlpc_poll(ctx->bmc.astlpc); 219 assert(ctx->kcs[MCTP_ASTLPC_KCS_REG_STATUS] & 220 KCS_STATUS_CHANNEL_ACTIVE); 221 assert(ctx->kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0xff); 222 223 /* Host dequeues channel init result */ 224 mctp_astlpc_poll(ctx->host.astlpc); 225 } 226 227 static void network_destroy(struct astlpc_test *ctx) 228 { 229 endpoint_destroy(&ctx->bmc); 230 endpoint_destroy(&ctx->host); 231 free(ctx->lpc_mem); 232 } 233 234 static void astlpc_assert_tx_packet(struct astlpc_endpoint *src, 235 const void *expected, size_t len) 236 { 237 const size_t tx_body = src->astlpc->layout.tx.offset + 4 + 4; 238 const void *test = ((char *)src->astlpc->lpc_map) + tx_body; 239 assert(!memcmp(test, expected, len)); 240 } 241 242 static void astlpc_test_packetised_message_bmc_to_host(void) 243 { 244 struct astlpc_test ctx = { 0 }; 245 uint8_t msg[2 * MCTP_BTU]; 246 int rc; 247 248 /* Test harness initialisation */ 249 250 network_init(&ctx); 251 252 memset(&msg[0], 0x5a, MCTP_BTU); 253 memset(&msg[MCTP_BTU], 0xa5, MCTP_BTU); 254 255 ctx.msg = &msg[0]; 256 ctx.count = 0; 257 mctp_set_rx_all(ctx.host.mctp, rx_message, &ctx); 258 259 /* BMC sends a message */ 260 rc = mctp_message_tx(ctx.bmc.mctp, 9, msg, sizeof(msg)); 261 assert(rc == 0); 262 263 /* Host receives the first packet */ 264 mctp_astlpc_poll(ctx.host.astlpc); 265 266 /* BMC dequeues ownership hand-over and sends the queued packet */ 267 rc = mctp_astlpc_poll(ctx.bmc.astlpc); 268 assert(rc == 0); 269 270 /* Host receives the next packet */ 271 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 272 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01); 273 274 astlpc_assert_tx_packet(&ctx.bmc, &msg[MCTP_BTU], MCTP_BTU); 275 276 /* Host receives final packet */ 277 mctp_astlpc_poll(ctx.host.astlpc); 278 assert(ctx.count == 1); 279 280 network_destroy(&ctx); 281 } 282 283 static void astlpc_test_simple_message_host_to_bmc(void) 284 { 285 struct astlpc_test ctx = { 0 }; 286 uint8_t msg[MCTP_BTU]; 287 int rc; 288 289 /* Test harness initialisation */ 290 291 network_init(&ctx); 292 293 memset(&msg[0], 0xa5, MCTP_BTU); 294 295 ctx.msg = &msg[0]; 296 ctx.count = 0; 297 mctp_set_rx_all(ctx.bmc.mctp, rx_message, &ctx); 298 299 /* Host sends the single-packet message */ 300 rc = mctp_message_tx(ctx.host.mctp, 8, msg, sizeof(msg)); 301 assert(rc == 0); 302 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF); 303 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01); 304 305 astlpc_assert_tx_packet(&ctx.host, &msg[0], MCTP_BTU); 306 307 /* BMC receives the single-packet message */ 308 mctp_astlpc_poll(ctx.bmc.astlpc); 309 assert(ctx.count == 1); 310 311 /* BMC returns Tx area ownership to Host */ 312 assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF)); 313 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x02); 314 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 315 316 /* Host dequeues ownership hand-over and sends the queued packet */ 317 rc = mctp_astlpc_poll(ctx.host.astlpc); 318 assert(rc == 0); 319 320 network_destroy(&ctx); 321 } 322 323 static void astlpc_test_simple_message_bmc_to_host(void) 324 { 325 struct astlpc_test ctx = { 0 }; 326 uint8_t msg[MCTP_BTU]; 327 int rc; 328 329 /* Test harness initialisation */ 330 331 network_init(&ctx); 332 333 memset(&msg[0], 0x5a, MCTP_BTU); 334 335 ctx.msg = &msg[0]; 336 ctx.count = 0; 337 mctp_set_rx_all(ctx.host.mctp, rx_message, &ctx); 338 339 /* BMC sends the single-packet message */ 340 rc = mctp_message_tx(ctx.bmc.mctp, 9, msg, sizeof(msg)); 341 assert(rc == 0); 342 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 343 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01); 344 345 astlpc_assert_tx_packet(&ctx.bmc, &msg[0], MCTP_BTU); 346 347 /* Host receives the single-packet message */ 348 mctp_astlpc_poll(ctx.host.astlpc); 349 assert(ctx.count == 1); 350 351 /* Host returns Rx area ownership to BMC */ 352 assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF)); 353 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x02); 354 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF); 355 356 /* BMC dequeues ownership hand-over and sends the queued packet */ 357 rc = mctp_astlpc_poll(ctx.bmc.astlpc); 358 assert(rc == 0); 359 360 network_destroy(&ctx); 361 } 362 363 static void astlpc_test_host_before_bmc(void) 364 { 365 struct mctp_binding_astlpc_mmio mmio = { 0 }; 366 struct mctp_binding_astlpc *astlpc; 367 uint8_t kcs[2] = { 0 }; 368 struct mctp *mctp; 369 int rc; 370 371 mctp = mctp_init(); 372 assert(mctp); 373 374 /* Inject KCS registers */ 375 mmio.kcs = &kcs; 376 377 /* Initialise the binding */ 378 astlpc = mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, NULL, 379 &astlpc_direct_mmio_ops, &mmio); 380 381 /* Register the binding to trigger the start-up sequence */ 382 rc = mctp_register_bus(mctp, &astlpc->binding, 8); 383 384 /* Start-up should fail as we haven't initialised the BMC */ 385 assert(rc < 0); 386 387 mctp_astlpc_destroy(astlpc); 388 mctp_destroy(mctp); 389 } 390 391 static void astlpc_test_bad_version(void) 392 { 393 assert(0 == 394 mctp_astlpc_negotiate_version(ASTLPC_VER_BAD, ASTLPC_VER_CUR, 395 ASTLPC_VER_MIN, ASTLPC_VER_CUR)); 396 assert(0 == 397 mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_BAD, 398 ASTLPC_VER_MIN, ASTLPC_VER_CUR)); 399 assert(0 == 400 mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_CUR, 401 ASTLPC_VER_BAD, ASTLPC_VER_CUR)); 402 assert(0 == 403 mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_CUR, 404 ASTLPC_VER_MIN, ASTLPC_VER_BAD)); 405 assert(0 == mctp_astlpc_negotiate_version( 406 ASTLPC_VER_CUR + 1, ASTLPC_VER_CUR, ASTLPC_VER_MIN, 407 ASTLPC_VER_CUR + 1)); 408 assert(0 == mctp_astlpc_negotiate_version( 409 ASTLPC_VER_MIN, ASTLPC_VER_CUR + 1, 410 ASTLPC_VER_CUR + 1, ASTLPC_VER_CUR)); 411 } 412 413 static void astlpc_test_incompatible_versions(void) 414 { 415 assert(0 == mctp_astlpc_negotiate_version( 416 ASTLPC_VER_CUR, ASTLPC_VER_CUR, ASTLPC_VER_CUR + 1, 417 ASTLPC_VER_CUR + 1)); 418 assert(0 == mctp_astlpc_negotiate_version( 419 ASTLPC_VER_CUR + 1, ASTLPC_VER_CUR + 1, 420 ASTLPC_VER_CUR, ASTLPC_VER_CUR)); 421 } 422 423 static void astlpc_test_choose_bmc_ver_cur(void) 424 { 425 assert(2 == mctp_astlpc_negotiate_version(1, 2, 2, 3)); 426 } 427 428 static void astlpc_test_choose_host_ver_cur(void) 429 { 430 assert(2 == mctp_astlpc_negotiate_version(2, 3, 1, 2)); 431 } 432 433 static void astlpc_test_version_host_fails_negotiation(void) 434 { 435 struct astlpc_endpoint bmc, host; 436 struct mctp_lpcmap_hdr *hdr; 437 uint8_t kcs[2] = { 0 }; 438 void *lpc_mem; 439 int rc; 440 441 /* Test harness initialisation */ 442 lpc_mem = calloc(1, 1 * 1024 * 1024); 443 assert(lpc_mem); 444 445 /* BMC initialisation */ 446 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 447 &kcs, lpc_mem); 448 assert(!rc); 449 450 /* Now the BMC is initialised, break its version announcement */ 451 hdr = lpc_mem; 452 hdr->bmc_ver_cur = ASTLPC_VER_BAD; 453 454 /* Host initialisation */ 455 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 456 &kcs, lpc_mem); 457 assert(rc < 0); 458 459 endpoint_destroy(&bmc); 460 endpoint_destroy(&host); 461 free(lpc_mem); 462 } 463 464 static void astlpc_test_version_bmc_fails_negotiation(void) 465 { 466 struct astlpc_endpoint bmc, host; 467 struct mctp_lpcmap_hdr *hdr; 468 uint8_t kcs[2] = { 0 }; 469 void *lpc_mem; 470 int rc; 471 472 /* Test harness initialisation */ 473 lpc_mem = calloc(1, 1 * 1024 * 1024); 474 assert(lpc_mem); 475 476 /* BMC initialisation */ 477 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 478 &kcs, lpc_mem); 479 assert(!rc); 480 481 /* Host initialisation */ 482 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 483 &kcs, lpc_mem); 484 assert(!rc); 485 486 /* Now the host is initialised, break its version announcement */ 487 hdr = lpc_mem; 488 hdr->host_ver_cur = ASTLPC_VER_BAD; 489 490 /* Poll the BMC to detect the broken host version */ 491 mctp_astlpc_poll(bmc.astlpc); 492 assert(!(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_CHANNEL_ACTIVE)); 493 494 /* Poll the host so it detects failed negotiation */ 495 rc = mctp_astlpc_poll(host.astlpc); 496 assert(rc < 0); 497 498 endpoint_destroy(&bmc); 499 endpoint_destroy(&host); 500 free(lpc_mem); 501 } 502 503 static void astlpc_test_simple_init(void) 504 { 505 struct astlpc_endpoint bmc, host; 506 uint8_t kcs[2] = { 0 }; 507 void *lpc_mem; 508 int rc; 509 510 /* Test harness initialisation */ 511 lpc_mem = calloc(1, 1 * 1024 * 1024); 512 assert(lpc_mem); 513 514 /* BMC initialisation */ 515 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 516 &kcs, lpc_mem); 517 assert(!rc); 518 519 /* Verify the BMC binding was initialised */ 520 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_BMC_READY); 521 522 /* Host initialisation */ 523 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 524 &kcs, lpc_mem); 525 assert(!rc); 526 527 /* Host sends channel init command */ 528 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF); 529 assert(kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x00); 530 531 /* BMC receives host channel init request */ 532 mctp_astlpc_poll(bmc.astlpc); 533 534 /* BMC sends init response */ 535 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 536 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_CHANNEL_ACTIVE); 537 assert(kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0xff); 538 539 /* Host dequeues data */ 540 mctp_astlpc_poll(host.astlpc); 541 542 endpoint_destroy(&bmc); 543 endpoint_destroy(&host); 544 free(lpc_mem); 545 } 546 547 static void astlpc_test_simple_indirect_message_bmc_to_host(void) 548 { 549 struct astlpc_test ctx = { 0 }; 550 uint8_t kcs[2] = { 0 }; 551 uint8_t msg[MCTP_BTU]; 552 int rc; 553 554 ctx.lpc_mem = calloc(1, LPC_WIN_SIZE); 555 assert(ctx.lpc_mem); 556 557 /* Test message data */ 558 memset(&msg[0], 0x5a, MCTP_BTU); 559 560 /* Manually set up the network so we can inject the indirect ops */ 561 562 /* BMC initialisation */ 563 ctx.bmc.mmio.bmc = true; 564 ctx.bmc.mctp = mctp_init(); 565 assert(ctx.bmc.mctp); 566 ctx.bmc.mmio.kcs = &kcs; 567 ctx.bmc.mmio.lpc = ctx.lpc_mem; 568 ctx.bmc.mmio.lpc_size = LPC_WIN_SIZE; 569 ctx.bmc.astlpc = 570 mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, NULL, 571 &astlpc_indirect_mmio_ops, &ctx.bmc.mmio); 572 mctp_register_bus(ctx.bmc.mctp, &ctx.bmc.astlpc->binding, 8); 573 574 /* Host initialisation */ 575 ctx.host.mmio.bmc = false; 576 ctx.host.mctp = mctp_init(); 577 assert(ctx.host.mctp); 578 ctx.host.mmio.kcs = &kcs; 579 ctx.host.mmio.lpc = ctx.lpc_mem; 580 ctx.host.mmio.lpc_size = LPC_WIN_SIZE; 581 ctx.host.astlpc = 582 mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, NULL, 583 &astlpc_indirect_mmio_ops, &ctx.host.mmio); 584 mctp_register_bus(ctx.host.mctp, &ctx.host.astlpc->binding, 9); 585 586 /* BMC processes host channel init request, alerts host */ 587 mctp_astlpc_poll(ctx.bmc.astlpc); 588 589 /* Host dequeues channel init result */ 590 mctp_astlpc_poll(ctx.host.astlpc); 591 592 ctx.msg = &msg[0]; 593 ctx.count = 0; 594 mctp_set_rx_all(ctx.host.mctp, rx_message, &ctx); 595 596 /* BMC sends the single-packet message */ 597 rc = mctp_message_tx(ctx.bmc.mctp, 9, msg, sizeof(msg)); 598 assert(rc == 0); 599 600 /* Host receives the single-packet message */ 601 rc = mctp_astlpc_poll(ctx.host.astlpc); 602 assert(rc == 0); 603 assert(ctx.count == 1); 604 605 /* BMC dequeues ownership hand-over and sends the queued packet */ 606 rc = mctp_astlpc_poll(ctx.bmc.astlpc); 607 assert(rc == 0); 608 609 /* Can still tear-down the network in the normal fashion */ 610 network_destroy(&ctx); 611 } 612 613 static void astlpc_test_host_tx_bmc_gone(void) 614 { 615 struct astlpc_test ctx = { 0 }; 616 uint8_t unwritten[MCTP_BTU]; 617 uint8_t msg[MCTP_BTU]; 618 int rc; 619 620 /* Test harness initialisation */ 621 622 network_init(&ctx); 623 624 memset(&msg[0], 0x5a, sizeof(msg)); 625 memset(&unwritten[0], 0, sizeof(unwritten)); 626 627 ctx.msg = &msg[0]; 628 ctx.count = 0; 629 630 /* Clear bmc-ready */ 631 endpoint_destroy(&ctx.bmc); 632 633 /* Host detects that the BMC is disabled */ 634 mctp_astlpc_poll(ctx.host.astlpc); 635 636 /* Host attempts to send the single-packet message, but is prevented */ 637 rc = mctp_message_tx(ctx.host.mctp, 8, msg, sizeof(msg)); 638 assert(rc == 0); 639 assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF)); 640 astlpc_assert_tx_packet(&ctx.host, &unwritten[0], MCTP_BTU); 641 642 /* BMC comes back */ 643 rc = endpoint_init(&ctx.bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 644 &ctx.kcs, ctx.lpc_mem); 645 assert(!rc); 646 mctp_set_rx_all(ctx.bmc.mctp, rx_message, &ctx); 647 648 /* Host triggers channel init */ 649 mctp_astlpc_poll(ctx.host.astlpc); 650 651 /* BMC handles channel init */ 652 mctp_astlpc_poll(ctx.bmc.astlpc); 653 654 /* Host completes channel init, flushing the Tx queue */ 655 mctp_astlpc_poll(ctx.host.astlpc); 656 657 /* BMC receives the single-packet message */ 658 mctp_astlpc_poll(ctx.bmc.astlpc); 659 assert(ctx.count == 1); 660 661 network_destroy(&ctx); 662 } 663 664 static void astlpc_test_poll_not_ready(void) 665 { 666 struct astlpc_endpoint bmc; 667 uint8_t kcs[2] = { 0 }; 668 void *lpc_mem; 669 int rc; 670 671 /* Test harness initialisation */ 672 lpc_mem = calloc(1, 1 * 1024 * 1024); 673 assert(lpc_mem); 674 675 /* BMC initialisation */ 676 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 677 &kcs, lpc_mem); 678 assert(!rc); 679 680 /* Check for a command despite none present */ 681 rc = mctp_astlpc_poll(bmc.astlpc); 682 683 /* Make sure it doesn't fail */ 684 assert(rc == 0); 685 686 endpoint_destroy(&bmc); 687 free(lpc_mem); 688 } 689 690 static void astlpc_test_undefined_command(void) 691 { 692 struct astlpc_endpoint bmc; 693 uint8_t kcs[2] = { 0 }; 694 void *lpc_mem; 695 int rc; 696 697 /* Test harness initialisation */ 698 lpc_mem = calloc(1, 1 * 1024 * 1024); 699 assert(lpc_mem); 700 701 /* BMC initialisation */ 702 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 703 &kcs, lpc_mem); 704 assert(!rc); 705 706 /* 0x5a isn't legal in v1 or v2 */ 707 kcs[MCTP_ASTLPC_KCS_REG_DATA] = 0x5a; 708 kcs[MCTP_ASTLPC_KCS_REG_STATUS] |= KCS_STATUS_IBF; 709 710 /* Check for a command despite none present */ 711 rc = mctp_astlpc_poll(bmc.astlpc); 712 713 /* Make sure it doesn't fail, bad command should be discarded */ 714 assert(rc == 0); 715 716 endpoint_destroy(&bmc); 717 free(lpc_mem); 718 } 719 720 #define BUFFER_MIN (MCTP_PACKET_SIZE(MCTP_BTU) + 4 + 4) 721 static const struct mctp_binding_astlpc astlpc_layout_ctx = { 722 .proto = &astlpc_protocol_version[3], 723 }; 724 725 static void astlpc_test_buffers_rx_offset_overflow(void) 726 { 727 struct mctp_astlpc_layout l = { 728 .rx = { UINT32_MAX, BUFFER_MIN }, 729 .tx = { control_size, BUFFER_MIN }, 730 }; 731 732 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 733 } 734 735 static void astlpc_test_buffers_tx_offset_overflow(void) 736 { 737 struct mctp_astlpc_layout l = { 738 .rx = { control_size, BUFFER_MIN }, 739 .tx = { UINT32_MAX, BUFFER_MIN }, 740 }; 741 742 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 743 } 744 745 static void astlpc_test_buffers_rx_size_overflow(void) 746 { 747 struct mctp_astlpc_layout l = { 748 .rx = { control_size + BUFFER_MIN, UINT32_MAX }, 749 .tx = { control_size, BUFFER_MIN }, 750 }; 751 752 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 753 } 754 755 static void astlpc_test_buffers_tx_size_overflow(void) 756 { 757 struct mctp_astlpc_layout l = { 758 .rx = { control_size, BUFFER_MIN }, 759 .tx = { control_size + BUFFER_MIN, UINT32_MAX }, 760 }; 761 762 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 763 } 764 765 static void astlpc_test_buffers_rx_window_violation(void) 766 { 767 struct mctp_astlpc_layout l = { 768 .rx = { LPC_WIN_SIZE - BUFFER_MIN + 1, BUFFER_MIN }, 769 .tx = { control_size, BUFFER_MIN }, 770 }; 771 772 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 773 } 774 775 static void astlpc_test_buffers_tx_window_violation(void) 776 { 777 struct mctp_astlpc_layout l = { 778 .rx = { control_size, BUFFER_MIN }, 779 .tx = { LPC_WIN_SIZE - BUFFER_MIN + 1, BUFFER_MIN }, 780 }; 781 782 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 783 } 784 785 static void astlpc_test_buffers_rx_size_fails_btu(void) 786 { 787 struct mctp_astlpc_layout l = { 788 .rx = { control_size, BUFFER_MIN - 1 }, 789 .tx = { control_size + BUFFER_MIN, BUFFER_MIN }, 790 }; 791 792 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 793 } 794 795 static void astlpc_test_buffers_tx_size_fails_btu(void) 796 { 797 struct mctp_astlpc_layout l = { 798 .rx = { control_size, BUFFER_MIN }, 799 .tx = { control_size + BUFFER_MIN, BUFFER_MIN - 1 }, 800 }; 801 802 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 803 } 804 805 static void astlpc_test_buffers_overlap_rx_low(void) 806 { 807 struct mctp_astlpc_layout l = { 808 .rx = { control_size, 2 * BUFFER_MIN }, 809 .tx = { control_size + BUFFER_MIN, 2 * BUFFER_MIN }, 810 }; 811 812 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 813 } 814 815 static void astlpc_test_buffers_overlap_tx_low(void) 816 { 817 struct mctp_astlpc_layout l = { 818 .rx = { control_size + BUFFER_MIN, 2 * BUFFER_MIN }, 819 .tx = { control_size, 2 * BUFFER_MIN }, 820 }; 821 822 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 823 } 824 825 static void astlpc_test_buffers_overlap_exact(void) 826 { 827 struct mctp_astlpc_layout l = { 828 .rx = { control_size, 2 * BUFFER_MIN }, 829 .tx = { control_size, 2 * BUFFER_MIN }, 830 }; 831 832 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 833 } 834 835 static void astlpc_test_buffers_overlap_control(void) 836 { 837 struct mctp_astlpc_layout l = { 838 .rx = { 0, BUFFER_MIN }, 839 .tx = { control_size + BUFFER_MIN, BUFFER_MIN }, 840 }; 841 842 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 843 } 844 845 static void astlpc_test_buffers_bad_host_proposal(void) 846 { 847 struct astlpc_endpoint bmc, host; 848 struct mctp_lpcmap_hdr *hdr; 849 uint8_t kcs[2] = { 0 }; 850 void *lpc_mem; 851 int rc; 852 853 /* Test harness initialisation */ 854 lpc_mem = calloc(1, 1 * 1024 * 1024); 855 assert(lpc_mem); 856 857 /* BMC initialisation */ 858 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 859 &kcs, lpc_mem); 860 assert(!rc); 861 862 /* Host initialisation */ 863 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 864 &kcs, lpc_mem); 865 assert(!rc); 866 867 /* 868 * Now that the host has initialised the control area, break 869 * something before polling the BMC 870 */ 871 hdr = lpc_mem; 872 hdr->layout.rx_size = 0; 873 874 mctp_astlpc_poll(bmc.astlpc); 875 876 /* Make sure the BMC has not set the channel to active */ 877 assert(!(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_CHANNEL_ACTIVE)); 878 879 endpoint_destroy(&host); 880 endpoint_destroy(&bmc); 881 free(lpc_mem); 882 } 883 884 static void astlpc_test_buffers_bad_bmc_proposal(void) 885 { 886 struct astlpc_endpoint bmc, host; 887 struct mctp_lpcmap_hdr *hdr; 888 uint8_t kcs[2] = { 0 }; 889 void *lpc_mem; 890 int rc; 891 892 /* Test harness initialisation */ 893 lpc_mem = calloc(1, 1 * 1024 * 1024); 894 assert(lpc_mem); 895 896 /* BMC initialisation */ 897 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 898 &kcs, lpc_mem); 899 assert(!rc); 900 901 /* 902 * Now that the BMC has initialised the control area, break something 903 * before initialising the host 904 */ 905 hdr = lpc_mem; 906 hdr->layout.rx_size = 0; 907 908 /* Host initialisation: Fails due to bad layout */ 909 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 910 &kcs, lpc_mem); 911 assert(rc < 0); 912 913 endpoint_destroy(&host); 914 endpoint_destroy(&bmc); 915 free(lpc_mem); 916 } 917 918 static void astlpc_test_buffers_bad_bmc_negotiation(void) 919 { 920 struct astlpc_endpoint bmc, host; 921 struct mctp_lpcmap_hdr *hdr; 922 uint8_t kcs[2] = { 0 }; 923 void *lpc_mem; 924 int rc; 925 926 /* Test harness initialisation */ 927 lpc_mem = calloc(1, 1 * 1024 * 1024); 928 assert(lpc_mem); 929 930 /* BMC initialisation */ 931 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 932 &kcs, lpc_mem); 933 assert(!rc); 934 935 /* Host initialisation */ 936 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 937 &kcs, lpc_mem); 938 assert(!rc); 939 940 mctp_astlpc_poll(bmc.astlpc); 941 942 /* 943 * Now that the BMC has initialised the control area, break something 944 * before polling the host 945 */ 946 hdr = lpc_mem; 947 hdr->layout.rx_size = 0; 948 949 rc = mctp_astlpc_poll(host.astlpc); 950 assert(rc < 0); 951 952 endpoint_destroy(&host); 953 endpoint_destroy(&bmc); 954 free(lpc_mem); 955 } 956 957 static void astlpc_test_buffers_bad_host_init(void) 958 { 959 struct astlpc_endpoint host; 960 uint8_t kcs[2] = { 0 }; 961 void *lpc_mem; 962 int rc; 963 964 /* Test harness initialisation */ 965 lpc_mem = calloc(1, 1 * 1024 * 1024); 966 assert(lpc_mem); 967 968 host.mctp = mctp_init(); 969 assert(host.mctp); 970 host.mmio.kcs = &kcs; 971 host.mmio.bmc = false; 972 973 /* Set the MTU to 0 to provoke a failure */ 974 host.astlpc = 975 mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_HOST, 0, lpc_mem, 976 &astlpc_direct_mmio_ops, &host.mmio); 977 978 rc = mctp_register_bus(host.mctp, &host.astlpc->binding, 8); 979 assert(rc < 0); 980 981 mctp_astlpc_destroy(host.astlpc); 982 mctp_destroy(host.mctp); 983 free(lpc_mem); 984 } 985 986 static void astlpc_test_negotiate_increased_mtu(void) 987 { 988 struct astlpc_endpoint bmc, host; 989 uint8_t kcs[2] = { 0 }; 990 void *lpc_mem; 991 int rc; 992 993 /* Test harness initialisation */ 994 lpc_mem = calloc(1, 1 * 1024 * 1024); 995 assert(lpc_mem); 996 997 /* BMC initialisation */ 998 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, 3 * MCTP_BTU, 999 &kcs, lpc_mem); 1000 assert(!rc); 1001 1002 /* Host initialisation */ 1003 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, 1004 2 * MCTP_BTU, &kcs, lpc_mem); 1005 assert(!rc); 1006 1007 rc = mctp_astlpc_poll(bmc.astlpc); 1008 assert(rc == 0); 1009 1010 rc = mctp_astlpc_poll(host.astlpc); 1011 assert(rc == 0); 1012 1013 endpoint_destroy(&host); 1014 endpoint_destroy(&bmc); 1015 free(lpc_mem); 1016 } 1017 1018 static void astlpc_test_negotiate_mtu_low_high(void) 1019 { 1020 struct astlpc_endpoint bmc, host; 1021 uint8_t kcs[2] = { 0 }; 1022 uint32_t bmtu, hmtu; 1023 void *lpc_mem; 1024 int rc; 1025 1026 /* Test harness initialisation */ 1027 lpc_mem = calloc(1, 1 * 1024 * 1024); 1028 assert(lpc_mem); 1029 1030 /* BMC initialisation */ 1031 bmtu = 3 * MCTP_BTU; 1032 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, bmtu, &kcs, 1033 lpc_mem); 1034 assert(!rc); 1035 1036 /* Host initialisation with low MTU */ 1037 hmtu = 2 * MCTP_BTU; 1038 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, hmtu, &kcs, 1039 lpc_mem); 1040 assert(!rc); 1041 1042 /* Process low MTU proposal */ 1043 rc = mctp_astlpc_poll(bmc.astlpc); 1044 assert(rc == 0); 1045 1046 /* Accept low MTU proposal */ 1047 rc = mctp_astlpc_poll(host.astlpc); 1048 assert(rc == 0); 1049 1050 assert(host.astlpc->layout.rx.size == 1051 astlpc_layout_ctx.proto->packet_size(MCTP_PACKET_SIZE(hmtu))); 1052 1053 /* Tear-down the host so we can bring up a new one */ 1054 endpoint_destroy(&host); 1055 1056 /* 1057 * Bring up a new host endpoint with a higher MTU than we previously 1058 * negotiated 1059 */ 1060 hmtu = 3 * MCTP_BTU; 1061 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, hmtu, &kcs, 1062 lpc_mem); 1063 assert(!rc); 1064 1065 /* Process high MTU proposal */ 1066 rc = mctp_astlpc_poll(bmc.astlpc); 1067 assert(rc == 0); 1068 1069 /* Accept high MTU proposal */ 1070 rc = mctp_astlpc_poll(host.astlpc); 1071 assert(rc == 0); 1072 1073 assert(host.astlpc->layout.rx.size == 1074 astlpc_layout_ctx.proto->packet_size(MCTP_PACKET_SIZE(bmtu))); 1075 1076 endpoint_destroy(&host); 1077 endpoint_destroy(&bmc); 1078 free(lpc_mem); 1079 } 1080 1081 static void astlpc_test_send_large_packet(void) 1082 { 1083 struct astlpc_endpoint *bmc, *host; 1084 struct astlpc_test ctx; 1085 uint8_t kcs[2] = { 0 }; 1086 void *lpc_mem; 1087 int rc; 1088 1089 host = &ctx.host; 1090 bmc = &ctx.bmc; 1091 1092 /* Test harness initialisation */ 1093 lpc_mem = calloc(1, 1 * 1024 * 1024); 1094 assert(lpc_mem); 1095 1096 /* BMC initialisation */ 1097 rc = endpoint_init(bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, 8192, &kcs, 1098 lpc_mem); 1099 assert(!rc); 1100 1101 /* Host initialisation */ 1102 rc = endpoint_init(host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, 8192, &kcs, 1103 lpc_mem); 1104 assert(!rc); 1105 1106 ctx.count = 0; 1107 mctp_set_rx_all(bmc->mctp, rx_message, &ctx); 1108 1109 rc = mctp_astlpc_poll(bmc->astlpc); 1110 assert(rc == 0); 1111 1112 rc = mctp_astlpc_poll(host->astlpc); 1113 assert(rc == 0); 1114 1115 ctx.msg = malloc(2 * MCTP_BODY_SIZE(8192)); 1116 assert(ctx.msg); 1117 1118 memset(ctx.msg, 0x5a, 2 * MCTP_BODY_SIZE(8192)); 1119 1120 rc = mctp_message_tx(host->mctp, 8, ctx.msg, 2 * MCTP_BODY_SIZE(8192)); 1121 assert(rc == 0); 1122 rc = mctp_astlpc_poll(bmc->astlpc); 1123 assert(rc == 0); 1124 rc = mctp_astlpc_poll(host->astlpc); 1125 assert(rc == 0); 1126 rc = mctp_astlpc_poll(bmc->astlpc); 1127 assert(rc == 0); 1128 rc = mctp_astlpc_poll(host->astlpc); 1129 assert(rc == 0); 1130 1131 assert(ctx.count == 1); 1132 1133 free(ctx.msg); 1134 endpoint_destroy(host); 1135 endpoint_destroy(bmc); 1136 free(lpc_mem); 1137 } 1138 1139 static void astlpc_test_tx_before_channel_init(void) 1140 { 1141 struct astlpc_endpoint *bmc; 1142 struct astlpc_test ctx; 1143 uint8_t kcs[2] = { 0 }; 1144 uint8_t msg[MCTP_BTU]; 1145 void *lpc_mem; 1146 int rc; 1147 1148 bmc = &ctx.bmc; 1149 1150 /* Test harness initialisation */ 1151 lpc_mem = calloc(1, 1 * 1024 * 1024); 1152 assert(lpc_mem); 1153 1154 /* BMC initialisation */ 1155 rc = endpoint_init(bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, 0, &kcs, 1156 lpc_mem); 1157 assert(!rc); 1158 1159 memset(msg, '\0', sizeof(msg)); 1160 1161 /* 1162 * There was once a bug where the calculated MTU was 0 and the 1163 * packetisation loop in mctp_message_tx_on_bus() allocated all the 1164 * memory. Catch the bug and avoid OOMing the test machine by 1165 * terminating after a period long enough to packetise the message. 1166 */ 1167 alarm(1); 1168 mctp_message_tx(bmc->mctp, 9, msg, sizeof(msg)); 1169 alarm(0); 1170 1171 endpoint_destroy(bmc); 1172 free(lpc_mem); 1173 } 1174 1175 static void astlpc_test_corrupt_host_tx(void) 1176 { 1177 struct astlpc_test ctx = { 0 }; 1178 struct mctp_lpcmap_hdr *hdr; 1179 uint8_t msg[MCTP_BTU]; 1180 uint32_t offset; 1181 uint32_t code; 1182 uint8_t *tlr; 1183 int rc; 1184 1185 /* Test harness initialisation */ 1186 1187 network_init(&ctx); 1188 1189 memset(&msg[0], 0xa5, MCTP_BTU); 1190 1191 ctx.msg = &msg[0]; 1192 ctx.count = 0; 1193 mctp_set_rx_all(ctx.bmc.mctp, rx_message, &ctx); 1194 1195 /* Host sends the single-packet message */ 1196 rc = mctp_message_tx(ctx.host.mctp, 8, msg, sizeof(msg)); 1197 assert(rc == 0); 1198 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF); 1199 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01); 1200 1201 astlpc_assert_tx_packet(&ctx.host, &msg[0], MCTP_BTU); 1202 1203 /* Corrupt the CRC-32 in the message trailer */ 1204 hdr = (struct mctp_lpcmap_hdr *)ctx.lpc_mem; 1205 offset = be32toh(hdr->layout.tx_offset); 1206 tlr = (uint8_t *)&ctx.lpc_mem[offset] + 4 + sizeof(msg); 1207 memcpy(&code, tlr, sizeof(code)); 1208 code = ~code; 1209 memcpy(tlr, &code, sizeof(code)); 1210 1211 /* BMC receives the single-packet message */ 1212 mctp_astlpc_poll(ctx.bmc.astlpc); 1213 assert(ctx.count == 0); 1214 1215 /* BMC returns Tx area ownership to Host */ 1216 assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF)); 1217 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x02); 1218 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 1219 1220 /* Host dequeues ownership hand-over */ 1221 rc = mctp_astlpc_poll(ctx.host.astlpc); 1222 assert(rc == 0); 1223 1224 network_destroy(&ctx); 1225 } 1226 1227 static void astlpc_test_corrupt_bmc_tx(void) 1228 { 1229 struct astlpc_test ctx = { 0 }; 1230 struct mctp_lpcmap_hdr *hdr; 1231 uint8_t msg[MCTP_BTU]; 1232 uint32_t offset; 1233 uint32_t code; 1234 uint8_t *tlr; 1235 int rc; 1236 1237 /* Test harness initialisation */ 1238 1239 network_init(&ctx); 1240 1241 memset(&msg[0], 0x5a, MCTP_BTU); 1242 1243 ctx.msg = &msg[0]; 1244 ctx.count = 0; 1245 mctp_set_rx_all(ctx.host.mctp, rx_message, &ctx); 1246 1247 /* BMC sends the single-packet message */ 1248 rc = mctp_message_tx(ctx.bmc.mctp, 9, msg, sizeof(msg)); 1249 assert(rc == 0); 1250 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 1251 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01); 1252 1253 /* Check that the BMC sent a fully-formed packet */ 1254 astlpc_assert_tx_packet(&ctx.bmc, &msg[0], MCTP_BTU); 1255 1256 /* Corrupt the CRC-32 in the message trailer */ 1257 hdr = (struct mctp_lpcmap_hdr *)ctx.lpc_mem; 1258 offset = be32toh(hdr->layout.rx_offset); 1259 tlr = (uint8_t *)&ctx.lpc_mem[offset] + 4 + sizeof(msg); 1260 memcpy(&code, tlr, sizeof(code)); 1261 code = ~code; 1262 memcpy(tlr, &code, sizeof(code)); 1263 1264 /* Host drops the single-packet message */ 1265 mctp_astlpc_poll(ctx.host.astlpc); 1266 assert(ctx.count == 0); 1267 1268 /* Host returns Rx area ownership to BMC */ 1269 assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF)); 1270 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x02); 1271 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF); 1272 1273 /* BMC dequeues ownership hand-over */ 1274 rc = mctp_astlpc_poll(ctx.bmc.astlpc); 1275 assert(rc == 0); 1276 1277 network_destroy(&ctx); 1278 } 1279 1280 /* clang-format off */ 1281 #define TEST_CASE(test) { #test, test } 1282 static const struct { 1283 const char *name; 1284 void (*test)(void); 1285 } astlpc_tests[] = { 1286 TEST_CASE(astlpc_test_simple_init), 1287 TEST_CASE(astlpc_test_bad_version), 1288 TEST_CASE(astlpc_test_incompatible_versions), 1289 TEST_CASE(astlpc_test_choose_bmc_ver_cur), 1290 TEST_CASE(astlpc_test_choose_host_ver_cur), 1291 TEST_CASE(astlpc_test_version_host_fails_negotiation), 1292 TEST_CASE(astlpc_test_version_bmc_fails_negotiation), 1293 TEST_CASE(astlpc_test_host_before_bmc), 1294 TEST_CASE(astlpc_test_simple_message_bmc_to_host), 1295 TEST_CASE(astlpc_test_simple_message_host_to_bmc), 1296 TEST_CASE(astlpc_test_packetised_message_bmc_to_host), 1297 TEST_CASE(astlpc_test_simple_indirect_message_bmc_to_host), 1298 TEST_CASE(astlpc_test_host_tx_bmc_gone), 1299 TEST_CASE(astlpc_test_poll_not_ready), 1300 TEST_CASE(astlpc_test_undefined_command), 1301 TEST_CASE(astlpc_test_buffers_rx_offset_overflow), 1302 TEST_CASE(astlpc_test_buffers_tx_offset_overflow), 1303 TEST_CASE(astlpc_test_buffers_rx_size_overflow), 1304 TEST_CASE(astlpc_test_buffers_tx_size_overflow), 1305 TEST_CASE(astlpc_test_buffers_rx_window_violation), 1306 TEST_CASE(astlpc_test_buffers_tx_window_violation), 1307 TEST_CASE(astlpc_test_buffers_rx_size_fails_btu), 1308 TEST_CASE(astlpc_test_buffers_tx_size_fails_btu), 1309 TEST_CASE(astlpc_test_buffers_overlap_rx_low), 1310 TEST_CASE(astlpc_test_buffers_overlap_tx_low), 1311 TEST_CASE(astlpc_test_buffers_bad_host_proposal), 1312 TEST_CASE(astlpc_test_buffers_bad_bmc_proposal), 1313 TEST_CASE(astlpc_test_buffers_bad_bmc_negotiation), 1314 TEST_CASE(astlpc_test_buffers_overlap_exact), 1315 TEST_CASE(astlpc_test_buffers_overlap_control), 1316 TEST_CASE(astlpc_test_buffers_bad_host_init), 1317 TEST_CASE(astlpc_test_negotiate_increased_mtu), 1318 TEST_CASE(astlpc_test_negotiate_mtu_low_high), 1319 TEST_CASE(astlpc_test_send_large_packet), 1320 TEST_CASE(astlpc_test_tx_before_channel_init), 1321 TEST_CASE(astlpc_test_corrupt_host_tx), 1322 TEST_CASE(astlpc_test_corrupt_bmc_tx), 1323 }; 1324 /* clang-format on */ 1325 1326 #ifndef BUILD_ASSERT 1327 #define BUILD_ASSERT(x) \ 1328 do { \ 1329 (void)sizeof(char[0 - (!(x))]); \ 1330 } while (0) 1331 #endif 1332 1333 int main(void) 1334 { 1335 size_t i; 1336 1337 mctp_set_log_stdio(MCTP_LOG_DEBUG); 1338 1339 BUILD_ASSERT(ARRAY_SIZE(astlpc_tests) < SIZE_MAX); 1340 for (i = 0; i < ARRAY_SIZE(astlpc_tests); i++) { 1341 mctp_prlog(MCTP_LOG_DEBUG, "begin: %s", astlpc_tests[i].name); 1342 astlpc_tests[i].test(); 1343 mctp_prlog(MCTP_LOG_DEBUG, "end: %s\n", astlpc_tests[i].name); 1344 } 1345 1346 return 0; 1347 } 1348