1 /* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */ 2 3 #ifdef HAVE_CONFIG_H 4 #include "config.h" 5 #endif 6 7 #define ASTLPC_VER_CUR 3 8 #include "astlpc.c" 9 10 #ifdef pr_fmt 11 #undef pr_fmt 12 #define pr_fmt(x) "test: " x 13 #endif 14 15 #include "compiler.h" 16 #include "container_of.h" 17 #include "libmctp-astlpc.h" 18 #include "libmctp-log.h" 19 20 #ifdef NDEBUG 21 #undef NDEBUG 22 #endif 23 24 #include <assert.h> 25 #include <limits.h> 26 #include <stdint.h> 27 #include <stdio.h> 28 #include <stdlib.h> 29 #include <string.h> 30 #include <sys/random.h> 31 #include <unistd.h> 32 33 #ifndef ARRAY_SIZE 34 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) 35 #endif 36 37 struct mctp_binding_astlpc_mmio { 38 struct mctp_binding_astlpc astlpc; 39 bool bmc; 40 41 uint8_t (*kcs)[2]; 42 43 size_t lpc_size; 44 uint8_t *lpc; 45 }; 46 47 struct astlpc_endpoint { 48 struct mctp_binding_astlpc_mmio mmio; 49 struct mctp_binding_astlpc *astlpc; 50 struct mctp *mctp; 51 }; 52 53 struct astlpc_test { 54 struct astlpc_endpoint bmc; 55 struct astlpc_endpoint host; 56 uint8_t kcs[2]; 57 uint8_t *lpc_mem; 58 59 void *msg; 60 uint8_t count; 61 }; 62 63 #define binding_to_mmio(b) \ 64 container_of(b, struct mctp_binding_astlpc_mmio, astlpc) 65 66 static int mctp_astlpc_mmio_kcs_read(void *data, 67 enum mctp_binding_astlpc_kcs_reg reg, 68 uint8_t *val) 69 { 70 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 71 72 *val = (*mmio->kcs)[reg]; 73 74 mctp_prdebug("%s: 0x%hhx from %s", __func__, *val, 75 reg ? "status" : "data"); 76 77 if (reg == MCTP_ASTLPC_KCS_REG_DATA) { 78 uint8_t flag = mmio->bmc ? KCS_STATUS_IBF : KCS_STATUS_OBF; 79 (*mmio->kcs)[MCTP_ASTLPC_KCS_REG_STATUS] &= ~flag; 80 } 81 82 return 0; 83 } 84 85 static int mctp_astlpc_mmio_kcs_write(void *data, 86 enum mctp_binding_astlpc_kcs_reg reg, 87 uint8_t val) 88 { 89 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 90 uint8_t *regp; 91 92 assert(reg == MCTP_ASTLPC_KCS_REG_DATA || 93 reg == MCTP_ASTLPC_KCS_REG_STATUS); 94 95 if (reg == MCTP_ASTLPC_KCS_REG_DATA) { 96 uint8_t flag = mmio->bmc ? KCS_STATUS_OBF : KCS_STATUS_IBF; 97 (*mmio->kcs)[MCTP_ASTLPC_KCS_REG_STATUS] |= flag; 98 } 99 100 regp = &(*mmio->kcs)[reg]; 101 if (reg == MCTP_ASTLPC_KCS_REG_STATUS) 102 *regp = (val & ~0xbU) | (val & *regp & 1); 103 else 104 *regp = val; 105 106 mctp_prdebug("%s: 0x%hhx to %s", __func__, val, 107 reg ? "status" : "data"); 108 109 return 0; 110 } 111 112 static const struct mctp_binding_astlpc_ops astlpc_direct_mmio_ops = { 113 .kcs_read = mctp_astlpc_mmio_kcs_read, 114 .kcs_write = mctp_astlpc_mmio_kcs_write, 115 }; 116 117 int mctp_astlpc_mmio_lpc_read(void *data, void *buf, long offset, size_t len) 118 { 119 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 120 121 mctp_prdebug("%s: %zu bytes from 0x%lx", __func__, len, offset); 122 123 assert(offset >= 0L); 124 assert(offset + len < mmio->lpc_size); 125 126 memcpy(buf, mmio->lpc + offset, len); 127 128 return 0; 129 } 130 131 int mctp_astlpc_mmio_lpc_write(void *data, const void *buf, long offset, 132 size_t len) 133 { 134 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 135 136 mctp_prdebug("%s: %zu bytes to 0x%lx", __func__, len, offset); 137 138 assert(offset >= 0L); 139 assert(offset + len < mmio->lpc_size); 140 141 memcpy(mmio->lpc + offset, buf, len); 142 143 return 0; 144 } 145 146 static const struct mctp_binding_astlpc_ops astlpc_indirect_mmio_ops = { 147 .kcs_read = mctp_astlpc_mmio_kcs_read, 148 .kcs_write = mctp_astlpc_mmio_kcs_write, 149 .lpc_read = mctp_astlpc_mmio_lpc_read, 150 .lpc_write = mctp_astlpc_mmio_lpc_write, 151 }; 152 153 static void rx_message(uint8_t eid __unused, bool tag_owner __unused, 154 uint8_t msg_tag __unused, void *data __unused, void *msg, 155 size_t len) 156 { 157 struct astlpc_test *test = data; 158 159 mctp_prdebug("MCTP message received: msg: %p, len %zd", msg, len); 160 161 assert(len > 0); 162 assert(msg); 163 assert(test); 164 assert(test->msg); 165 assert(!memcmp(test->msg, msg, len)); 166 167 test->count++; 168 } 169 170 static int endpoint_init(struct astlpc_endpoint *ep, mctp_eid_t eid, 171 uint8_t mode, uint32_t mtu, uint8_t (*kcs)[2], 172 void *lpc_mem) 173 { 174 /* 175 * Configure the direction of the KCS interface so we know whether to 176 * set or clear IBF or OBF on writes or reads. 177 */ 178 ep->mmio.bmc = (mode == MCTP_BINDING_ASTLPC_MODE_BMC); 179 180 ep->mctp = mctp_init(); 181 assert(ep->mctp); 182 183 /* Inject KCS registers */ 184 ep->mmio.kcs = kcs; 185 186 /* Initialise the binding */ 187 ep->astlpc = mctp_astlpc_init(mode, mtu, lpc_mem, 188 &astlpc_direct_mmio_ops, &ep->mmio); 189 assert(ep->astlpc); 190 191 return mctp_register_bus(ep->mctp, &ep->astlpc->binding, eid); 192 } 193 194 static void endpoint_destroy(struct astlpc_endpoint *ep) 195 { 196 mctp_astlpc_destroy(ep->astlpc); 197 mctp_destroy(ep->mctp); 198 } 199 200 static void network_init(struct astlpc_test *ctx) 201 { 202 int rc; 203 204 ctx->lpc_mem = calloc(1, 1 * 1024 * 1024); 205 assert(ctx->lpc_mem); 206 207 /* BMC initialisation */ 208 rc = endpoint_init(&ctx->bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 209 &ctx->kcs, ctx->lpc_mem); 210 assert(!rc); 211 assert(ctx->kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_BMC_READY); 212 213 /* Host initialisation */ 214 rc = endpoint_init(&ctx->host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, 215 MCTP_BTU, &ctx->kcs, ctx->lpc_mem); 216 assert(!rc); 217 218 /* BMC processes host channel init request, alerts host */ 219 mctp_astlpc_poll(ctx->bmc.astlpc); 220 assert(ctx->kcs[MCTP_ASTLPC_KCS_REG_STATUS] & 221 KCS_STATUS_CHANNEL_ACTIVE); 222 assert(ctx->kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0xff); 223 224 /* Host dequeues channel init result */ 225 mctp_astlpc_poll(ctx->host.astlpc); 226 } 227 228 static void network_destroy(struct astlpc_test *ctx) 229 { 230 endpoint_destroy(&ctx->bmc); 231 endpoint_destroy(&ctx->host); 232 free(ctx->lpc_mem); 233 } 234 235 static void astlpc_assert_tx_packet(struct astlpc_endpoint *src, 236 const void *expected, size_t len) 237 { 238 const size_t tx_body = src->astlpc->layout.tx.offset + 4 + 4; 239 const void *test = ((char *)src->astlpc->lpc_map) + tx_body; 240 assert(!memcmp(test, expected, len)); 241 } 242 243 static void astlpc_test_packetised_message_bmc_to_host(void) 244 { 245 struct astlpc_test ctx = { 0 }; 246 uint8_t msg[2 * MCTP_BTU]; 247 int rc; 248 249 /* Test harness initialisation */ 250 251 network_init(&ctx); 252 253 memset(&msg[0], 0x5a, MCTP_BTU); 254 memset(&msg[MCTP_BTU], 0xa5, MCTP_BTU); 255 256 ctx.msg = &msg[0]; 257 ctx.count = 0; 258 mctp_set_rx_all(ctx.host.mctp, rx_message, &ctx); 259 260 /* BMC sends a message */ 261 rc = mctp_message_tx(ctx.bmc.mctp, 9, MCTP_MESSAGE_TO_SRC, 0, msg, 262 sizeof(msg)); 263 assert(rc == 0); 264 265 /* Host receives the first packet */ 266 mctp_astlpc_poll(ctx.host.astlpc); 267 268 /* BMC dequeues ownership hand-over and sends the queued packet */ 269 rc = mctp_astlpc_poll(ctx.bmc.astlpc); 270 assert(rc == 0); 271 272 /* Host receives the next packet */ 273 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 274 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01); 275 276 astlpc_assert_tx_packet(&ctx.bmc, &msg[MCTP_BTU], MCTP_BTU); 277 278 /* Host receives final packet */ 279 mctp_astlpc_poll(ctx.host.astlpc); 280 assert(ctx.count == 1); 281 282 network_destroy(&ctx); 283 } 284 285 static void astlpc_test_simple_message_host_to_bmc(void) 286 { 287 struct astlpc_test ctx = { 0 }; 288 uint8_t msg[MCTP_BTU]; 289 uint8_t tag = 0; 290 int rc; 291 292 /* Test harness initialisation */ 293 294 network_init(&ctx); 295 296 memset(&msg[0], 0xa5, MCTP_BTU); 297 298 ctx.msg = &msg[0]; 299 ctx.count = 0; 300 mctp_set_rx_all(ctx.bmc.mctp, rx_message, &ctx); 301 302 /* Host sends the single-packet message */ 303 rc = mctp_message_tx(ctx.host.mctp, 8, MCTP_MESSAGE_TO_DST, tag, msg, 304 sizeof(msg)); 305 assert(rc == 0); 306 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF); 307 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01); 308 309 astlpc_assert_tx_packet(&ctx.host, &msg[0], MCTP_BTU); 310 311 /* BMC receives the single-packet message */ 312 mctp_astlpc_poll(ctx.bmc.astlpc); 313 assert(ctx.count == 1); 314 315 /* BMC returns Tx area ownership to Host */ 316 assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF)); 317 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x02); 318 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 319 320 /* Host dequeues ownership hand-over and sends the queued packet */ 321 rc = mctp_astlpc_poll(ctx.host.astlpc); 322 assert(rc == 0); 323 324 network_destroy(&ctx); 325 } 326 327 static void astlpc_test_simple_message_bmc_to_host(void) 328 { 329 struct astlpc_test ctx = { 0 }; 330 uint8_t msg[MCTP_BTU]; 331 uint8_t tag = 0; 332 int rc; 333 334 /* Test harness initialisation */ 335 336 network_init(&ctx); 337 338 memset(&msg[0], 0x5a, MCTP_BTU); 339 340 ctx.msg = &msg[0]; 341 ctx.count = 0; 342 mctp_set_rx_all(ctx.host.mctp, rx_message, &ctx); 343 344 /* BMC sends the single-packet message */ 345 rc = mctp_message_tx(ctx.bmc.mctp, 9, MCTP_MESSAGE_TO_SRC, tag, msg, 346 sizeof(msg)); 347 assert(rc == 0); 348 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 349 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01); 350 351 astlpc_assert_tx_packet(&ctx.bmc, &msg[0], MCTP_BTU); 352 353 /* Host receives the single-packet message */ 354 mctp_astlpc_poll(ctx.host.astlpc); 355 assert(ctx.count == 1); 356 357 /* Host returns Rx area ownership to BMC */ 358 assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF)); 359 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x02); 360 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF); 361 362 /* BMC dequeues ownership hand-over and sends the queued packet */ 363 rc = mctp_astlpc_poll(ctx.bmc.astlpc); 364 assert(rc == 0); 365 366 network_destroy(&ctx); 367 } 368 369 static void astlpc_test_host_before_bmc(void) 370 { 371 struct mctp_binding_astlpc_mmio mmio = { 0 }; 372 struct mctp_binding_astlpc *astlpc; 373 uint8_t kcs[2] = { 0 }; 374 struct mctp *mctp; 375 int rc; 376 377 mctp = mctp_init(); 378 assert(mctp); 379 380 /* Inject KCS registers */ 381 mmio.kcs = &kcs; 382 383 /* Initialise the binding */ 384 astlpc = mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, NULL, 385 &astlpc_direct_mmio_ops, &mmio); 386 387 /* Register the binding to trigger the start-up sequence */ 388 rc = mctp_register_bus(mctp, &astlpc->binding, 8); 389 390 /* Start-up should fail as we haven't initialised the BMC */ 391 assert(rc < 0); 392 393 mctp_astlpc_destroy(astlpc); 394 mctp_destroy(mctp); 395 } 396 397 static void astlpc_test_bad_version(void) 398 { 399 assert(0 == 400 mctp_astlpc_negotiate_version(ASTLPC_VER_BAD, ASTLPC_VER_CUR, 401 ASTLPC_VER_MIN, ASTLPC_VER_CUR)); 402 assert(0 == 403 mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_BAD, 404 ASTLPC_VER_MIN, ASTLPC_VER_CUR)); 405 assert(0 == 406 mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_CUR, 407 ASTLPC_VER_BAD, ASTLPC_VER_CUR)); 408 assert(0 == 409 mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_CUR, 410 ASTLPC_VER_MIN, ASTLPC_VER_BAD)); 411 assert(0 == mctp_astlpc_negotiate_version( 412 ASTLPC_VER_CUR + 1, ASTLPC_VER_CUR, ASTLPC_VER_MIN, 413 ASTLPC_VER_CUR + 1)); 414 assert(0 == mctp_astlpc_negotiate_version( 415 ASTLPC_VER_MIN, ASTLPC_VER_CUR + 1, 416 ASTLPC_VER_CUR + 1, ASTLPC_VER_CUR)); 417 } 418 419 static void astlpc_test_incompatible_versions(void) 420 { 421 assert(0 == mctp_astlpc_negotiate_version( 422 ASTLPC_VER_CUR, ASTLPC_VER_CUR, ASTLPC_VER_CUR + 1, 423 ASTLPC_VER_CUR + 1)); 424 assert(0 == mctp_astlpc_negotiate_version( 425 ASTLPC_VER_CUR + 1, ASTLPC_VER_CUR + 1, 426 ASTLPC_VER_CUR, ASTLPC_VER_CUR)); 427 } 428 429 static void astlpc_test_choose_bmc_ver_cur(void) 430 { 431 assert(2 == mctp_astlpc_negotiate_version(1, 2, 2, 3)); 432 } 433 434 static void astlpc_test_choose_host_ver_cur(void) 435 { 436 assert(2 == mctp_astlpc_negotiate_version(2, 3, 1, 2)); 437 } 438 439 static void astlpc_test_version_host_fails_negotiation(void) 440 { 441 struct astlpc_endpoint bmc, host; 442 struct mctp_lpcmap_hdr *hdr; 443 uint8_t kcs[2] = { 0 }; 444 void *lpc_mem; 445 int rc; 446 447 /* Test harness initialisation */ 448 lpc_mem = calloc(1, 1 * 1024 * 1024); 449 assert(lpc_mem); 450 451 /* BMC initialisation */ 452 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 453 &kcs, lpc_mem); 454 assert(!rc); 455 456 /* Now the BMC is initialised, break its version announcement */ 457 hdr = lpc_mem; 458 hdr->bmc_ver_cur = ASTLPC_VER_BAD; 459 460 /* Host initialisation */ 461 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 462 &kcs, lpc_mem); 463 assert(rc < 0); 464 465 endpoint_destroy(&bmc); 466 endpoint_destroy(&host); 467 free(lpc_mem); 468 } 469 470 static void astlpc_test_version_bmc_fails_negotiation(void) 471 { 472 struct astlpc_endpoint bmc, host; 473 struct mctp_lpcmap_hdr *hdr; 474 uint8_t kcs[2] = { 0 }; 475 void *lpc_mem; 476 int rc; 477 478 /* Test harness initialisation */ 479 lpc_mem = calloc(1, 1 * 1024 * 1024); 480 assert(lpc_mem); 481 482 /* BMC initialisation */ 483 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 484 &kcs, lpc_mem); 485 assert(!rc); 486 487 /* Host initialisation */ 488 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 489 &kcs, lpc_mem); 490 assert(!rc); 491 492 /* Now the host is initialised, break its version announcement */ 493 hdr = lpc_mem; 494 hdr->host_ver_cur = ASTLPC_VER_BAD; 495 496 /* Poll the BMC to detect the broken host version */ 497 mctp_astlpc_poll(bmc.astlpc); 498 assert(!(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_CHANNEL_ACTIVE)); 499 500 /* Poll the host so it detects failed negotiation */ 501 rc = mctp_astlpc_poll(host.astlpc); 502 assert(rc < 0); 503 504 endpoint_destroy(&bmc); 505 endpoint_destroy(&host); 506 free(lpc_mem); 507 } 508 509 static void astlpc_test_simple_init(void) 510 { 511 struct astlpc_endpoint bmc, host; 512 uint8_t kcs[2] = { 0 }; 513 void *lpc_mem; 514 int rc; 515 516 /* Test harness initialisation */ 517 lpc_mem = calloc(1, 1 * 1024 * 1024); 518 assert(lpc_mem); 519 520 /* BMC initialisation */ 521 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 522 &kcs, lpc_mem); 523 assert(!rc); 524 525 /* Verify the BMC binding was initialised */ 526 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_BMC_READY); 527 528 /* Host initialisation */ 529 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 530 &kcs, lpc_mem); 531 assert(!rc); 532 533 /* Host sends channel init command */ 534 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF); 535 assert(kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x00); 536 537 /* BMC receives host channel init request */ 538 mctp_astlpc_poll(bmc.astlpc); 539 540 /* BMC sends init response */ 541 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 542 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_CHANNEL_ACTIVE); 543 assert(kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0xff); 544 545 /* Host dequeues data */ 546 mctp_astlpc_poll(host.astlpc); 547 548 endpoint_destroy(&bmc); 549 endpoint_destroy(&host); 550 free(lpc_mem); 551 } 552 553 static void astlpc_test_simple_indirect_message_bmc_to_host(void) 554 { 555 struct astlpc_test ctx = { 0 }; 556 uint8_t kcs[2] = { 0 }; 557 uint8_t msg[MCTP_BTU]; 558 uint8_t tag = 0; 559 int rc; 560 561 ctx.lpc_mem = calloc(1, LPC_WIN_SIZE); 562 assert(ctx.lpc_mem); 563 564 /* Test message data */ 565 memset(&msg[0], 0x5a, MCTP_BTU); 566 567 /* Manually set up the network so we can inject the indirect ops */ 568 569 /* BMC initialisation */ 570 ctx.bmc.mmio.bmc = true; 571 ctx.bmc.mctp = mctp_init(); 572 assert(ctx.bmc.mctp); 573 ctx.bmc.mmio.kcs = &kcs; 574 ctx.bmc.mmio.lpc = ctx.lpc_mem; 575 ctx.bmc.mmio.lpc_size = LPC_WIN_SIZE; 576 ctx.bmc.astlpc = 577 mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, NULL, 578 &astlpc_indirect_mmio_ops, &ctx.bmc.mmio); 579 mctp_register_bus(ctx.bmc.mctp, &ctx.bmc.astlpc->binding, 8); 580 581 /* Host initialisation */ 582 ctx.host.mmio.bmc = false; 583 ctx.host.mctp = mctp_init(); 584 assert(ctx.host.mctp); 585 ctx.host.mmio.kcs = &kcs; 586 ctx.host.mmio.lpc = ctx.lpc_mem; 587 ctx.host.mmio.lpc_size = LPC_WIN_SIZE; 588 ctx.host.astlpc = 589 mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, NULL, 590 &astlpc_indirect_mmio_ops, &ctx.host.mmio); 591 mctp_register_bus(ctx.host.mctp, &ctx.host.astlpc->binding, 9); 592 593 /* BMC processes host channel init request, alerts host */ 594 mctp_astlpc_poll(ctx.bmc.astlpc); 595 596 /* Host dequeues channel init result */ 597 mctp_astlpc_poll(ctx.host.astlpc); 598 599 ctx.msg = &msg[0]; 600 ctx.count = 0; 601 mctp_set_rx_all(ctx.host.mctp, rx_message, &ctx); 602 603 /* BMC sends the single-packet message */ 604 rc = mctp_message_tx(ctx.bmc.mctp, 9, MCTP_MESSAGE_TO_SRC, tag, msg, 605 sizeof(msg)); 606 assert(rc == 0); 607 608 /* Host receives the single-packet message */ 609 rc = mctp_astlpc_poll(ctx.host.astlpc); 610 assert(rc == 0); 611 assert(ctx.count == 1); 612 613 /* BMC dequeues ownership hand-over and sends the queued packet */ 614 rc = mctp_astlpc_poll(ctx.bmc.astlpc); 615 assert(rc == 0); 616 617 /* Can still tear-down the network in the normal fashion */ 618 network_destroy(&ctx); 619 } 620 621 static void astlpc_test_host_tx_bmc_gone(void) 622 { 623 struct astlpc_test ctx = { 0 }; 624 uint8_t unwritten[MCTP_BTU]; 625 uint8_t msg[MCTP_BTU]; 626 uint8_t tag = 0; 627 int rc; 628 629 /* Test harness initialisation */ 630 631 network_init(&ctx); 632 633 memset(&msg[0], 0x5a, sizeof(msg)); 634 memset(&unwritten[0], 0, sizeof(unwritten)); 635 636 ctx.msg = &msg[0]; 637 ctx.count = 0; 638 639 /* Clear bmc-ready */ 640 endpoint_destroy(&ctx.bmc); 641 642 /* Host detects that the BMC is disabled */ 643 mctp_astlpc_poll(ctx.host.astlpc); 644 645 /* Host attempts to send the single-packet message, but is prevented */ 646 rc = mctp_message_tx(ctx.host.mctp, 8, MCTP_MESSAGE_TO_DST, tag, msg, 647 sizeof(msg)); 648 assert(rc == 0); 649 assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF)); 650 astlpc_assert_tx_packet(&ctx.host, &unwritten[0], MCTP_BTU); 651 652 /* BMC comes back */ 653 rc = endpoint_init(&ctx.bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 654 &ctx.kcs, ctx.lpc_mem); 655 assert(!rc); 656 mctp_set_rx_all(ctx.bmc.mctp, rx_message, &ctx); 657 658 /* Host triggers channel init */ 659 mctp_astlpc_poll(ctx.host.astlpc); 660 661 /* BMC handles channel init */ 662 mctp_astlpc_poll(ctx.bmc.astlpc); 663 664 /* Host completes channel init, flushing the Tx queue */ 665 mctp_astlpc_poll(ctx.host.astlpc); 666 667 /* BMC receives the single-packet message */ 668 mctp_astlpc_poll(ctx.bmc.astlpc); 669 assert(ctx.count == 1); 670 671 network_destroy(&ctx); 672 } 673 674 static void astlpc_test_poll_not_ready(void) 675 { 676 struct astlpc_endpoint bmc; 677 uint8_t kcs[2] = { 0 }; 678 void *lpc_mem; 679 int rc; 680 681 /* Test harness initialisation */ 682 lpc_mem = calloc(1, 1 * 1024 * 1024); 683 assert(lpc_mem); 684 685 /* BMC initialisation */ 686 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 687 &kcs, lpc_mem); 688 assert(!rc); 689 690 /* Check for a command despite none present */ 691 rc = mctp_astlpc_poll(bmc.astlpc); 692 693 /* Make sure it doesn't fail */ 694 assert(rc == 0); 695 696 endpoint_destroy(&bmc); 697 free(lpc_mem); 698 } 699 700 static void astlpc_test_undefined_command(void) 701 { 702 struct astlpc_endpoint bmc; 703 uint8_t kcs[2] = { 0 }; 704 void *lpc_mem; 705 int rc; 706 707 /* Test harness initialisation */ 708 lpc_mem = calloc(1, 1 * 1024 * 1024); 709 assert(lpc_mem); 710 711 /* BMC initialisation */ 712 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 713 &kcs, lpc_mem); 714 assert(!rc); 715 716 /* 0x5a isn't legal in v1 or v2 */ 717 kcs[MCTP_ASTLPC_KCS_REG_DATA] = 0x5a; 718 kcs[MCTP_ASTLPC_KCS_REG_STATUS] |= KCS_STATUS_IBF; 719 720 /* Check for a command despite none present */ 721 rc = mctp_astlpc_poll(bmc.astlpc); 722 723 /* Make sure it doesn't fail, bad command should be discarded */ 724 assert(rc == 0); 725 726 endpoint_destroy(&bmc); 727 free(lpc_mem); 728 } 729 730 #define BUFFER_MIN (MCTP_PACKET_SIZE(MCTP_BTU) + 4 + 4) 731 static const struct mctp_binding_astlpc astlpc_layout_ctx = { 732 .proto = &astlpc_protocol_version[3], 733 }; 734 735 static void astlpc_test_buffers_rx_offset_overflow(void) 736 { 737 struct mctp_astlpc_layout l = { 738 .rx = { UINT32_MAX, BUFFER_MIN }, 739 .tx = { control_size, BUFFER_MIN }, 740 }; 741 742 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 743 } 744 745 static void astlpc_test_buffers_tx_offset_overflow(void) 746 { 747 struct mctp_astlpc_layout l = { 748 .rx = { control_size, BUFFER_MIN }, 749 .tx = { UINT32_MAX, BUFFER_MIN }, 750 }; 751 752 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 753 } 754 755 static void astlpc_test_buffers_rx_size_overflow(void) 756 { 757 struct mctp_astlpc_layout l = { 758 .rx = { control_size + BUFFER_MIN, UINT32_MAX }, 759 .tx = { control_size, BUFFER_MIN }, 760 }; 761 762 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 763 } 764 765 static void astlpc_test_buffers_tx_size_overflow(void) 766 { 767 struct mctp_astlpc_layout l = { 768 .rx = { control_size, BUFFER_MIN }, 769 .tx = { control_size + BUFFER_MIN, UINT32_MAX }, 770 }; 771 772 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 773 } 774 775 static void astlpc_test_buffers_rx_window_violation(void) 776 { 777 struct mctp_astlpc_layout l = { 778 .rx = { LPC_WIN_SIZE - BUFFER_MIN + 1, BUFFER_MIN }, 779 .tx = { control_size, BUFFER_MIN }, 780 }; 781 782 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 783 } 784 785 static void astlpc_test_buffers_tx_window_violation(void) 786 { 787 struct mctp_astlpc_layout l = { 788 .rx = { control_size, BUFFER_MIN }, 789 .tx = { LPC_WIN_SIZE - BUFFER_MIN + 1, BUFFER_MIN }, 790 }; 791 792 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 793 } 794 795 static void astlpc_test_buffers_rx_size_fails_btu(void) 796 { 797 struct mctp_astlpc_layout l = { 798 .rx = { control_size, BUFFER_MIN - 1 }, 799 .tx = { control_size + BUFFER_MIN, BUFFER_MIN }, 800 }; 801 802 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 803 } 804 805 static void astlpc_test_buffers_tx_size_fails_btu(void) 806 { 807 struct mctp_astlpc_layout l = { 808 .rx = { control_size, BUFFER_MIN }, 809 .tx = { control_size + BUFFER_MIN, BUFFER_MIN - 1 }, 810 }; 811 812 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 813 } 814 815 static void astlpc_test_buffers_overlap_rx_low(void) 816 { 817 struct mctp_astlpc_layout l = { 818 .rx = { control_size, 2 * BUFFER_MIN }, 819 .tx = { control_size + BUFFER_MIN, 2 * BUFFER_MIN }, 820 }; 821 822 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 823 } 824 825 static void astlpc_test_buffers_overlap_tx_low(void) 826 { 827 struct mctp_astlpc_layout l = { 828 .rx = { control_size + BUFFER_MIN, 2 * BUFFER_MIN }, 829 .tx = { control_size, 2 * BUFFER_MIN }, 830 }; 831 832 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 833 } 834 835 static void astlpc_test_buffers_overlap_exact(void) 836 { 837 struct mctp_astlpc_layout l = { 838 .rx = { control_size, 2 * BUFFER_MIN }, 839 .tx = { control_size, 2 * BUFFER_MIN }, 840 }; 841 842 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 843 } 844 845 static void astlpc_test_buffers_overlap_control(void) 846 { 847 struct mctp_astlpc_layout l = { 848 .rx = { 0, BUFFER_MIN }, 849 .tx = { control_size + BUFFER_MIN, BUFFER_MIN }, 850 }; 851 852 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 853 } 854 855 static void astlpc_test_buffers_bad_host_proposal(void) 856 { 857 struct astlpc_endpoint bmc, host; 858 struct mctp_lpcmap_hdr *hdr; 859 uint8_t kcs[2] = { 0 }; 860 void *lpc_mem; 861 int rc; 862 863 /* Test harness initialisation */ 864 lpc_mem = calloc(1, 1 * 1024 * 1024); 865 assert(lpc_mem); 866 867 /* BMC initialisation */ 868 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 869 &kcs, lpc_mem); 870 assert(!rc); 871 872 /* Host initialisation */ 873 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 874 &kcs, lpc_mem); 875 assert(!rc); 876 877 /* 878 * Now that the host has initialised the control area, break 879 * something before polling the BMC 880 */ 881 hdr = lpc_mem; 882 hdr->layout.rx_size = 0; 883 884 mctp_astlpc_poll(bmc.astlpc); 885 886 /* Make sure the BMC has not set the channel to active */ 887 assert(!(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_CHANNEL_ACTIVE)); 888 889 endpoint_destroy(&host); 890 endpoint_destroy(&bmc); 891 free(lpc_mem); 892 } 893 894 static void astlpc_test_buffers_bad_bmc_proposal(void) 895 { 896 struct astlpc_endpoint bmc, host; 897 struct mctp_lpcmap_hdr *hdr; 898 uint8_t kcs[2] = { 0 }; 899 void *lpc_mem; 900 int rc; 901 902 /* Test harness initialisation */ 903 lpc_mem = calloc(1, 1 * 1024 * 1024); 904 assert(lpc_mem); 905 906 /* BMC initialisation */ 907 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 908 &kcs, lpc_mem); 909 assert(!rc); 910 911 /* 912 * Now that the BMC has initialised the control area, break something 913 * before initialising the host 914 */ 915 hdr = lpc_mem; 916 hdr->layout.rx_size = 0; 917 918 /* Host initialisation: Fails due to bad layout */ 919 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 920 &kcs, lpc_mem); 921 assert(rc < 0); 922 923 endpoint_destroy(&host); 924 endpoint_destroy(&bmc); 925 free(lpc_mem); 926 } 927 928 static void astlpc_test_buffers_bad_bmc_negotiation(void) 929 { 930 struct astlpc_endpoint bmc, host; 931 struct mctp_lpcmap_hdr *hdr; 932 uint8_t kcs[2] = { 0 }; 933 void *lpc_mem; 934 int rc; 935 936 /* Test harness initialisation */ 937 lpc_mem = calloc(1, 1 * 1024 * 1024); 938 assert(lpc_mem); 939 940 /* BMC initialisation */ 941 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 942 &kcs, lpc_mem); 943 assert(!rc); 944 945 /* Host initialisation */ 946 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 947 &kcs, lpc_mem); 948 assert(!rc); 949 950 mctp_astlpc_poll(bmc.astlpc); 951 952 /* 953 * Now that the BMC has initialised the control area, break something 954 * before polling the host 955 */ 956 hdr = lpc_mem; 957 hdr->layout.rx_size = 0; 958 959 rc = mctp_astlpc_poll(host.astlpc); 960 assert(rc < 0); 961 962 endpoint_destroy(&host); 963 endpoint_destroy(&bmc); 964 free(lpc_mem); 965 } 966 967 static void astlpc_test_buffers_bad_host_init(void) 968 { 969 struct astlpc_endpoint host; 970 uint8_t kcs[2] = { 0 }; 971 void *lpc_mem; 972 int rc; 973 974 /* Test harness initialisation */ 975 lpc_mem = calloc(1, 1 * 1024 * 1024); 976 assert(lpc_mem); 977 978 host.mctp = mctp_init(); 979 assert(host.mctp); 980 host.mmio.kcs = &kcs; 981 host.mmio.bmc = false; 982 983 /* Set the MTU to 0 to provoke a failure */ 984 host.astlpc = 985 mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_HOST, 0, lpc_mem, 986 &astlpc_direct_mmio_ops, &host.mmio); 987 988 rc = mctp_register_bus(host.mctp, &host.astlpc->binding, 8); 989 assert(rc < 0); 990 991 mctp_astlpc_destroy(host.astlpc); 992 mctp_destroy(host.mctp); 993 free(lpc_mem); 994 } 995 996 static void astlpc_test_negotiate_increased_mtu(void) 997 { 998 struct astlpc_endpoint bmc, host; 999 uint8_t kcs[2] = { 0 }; 1000 void *lpc_mem; 1001 int rc; 1002 1003 /* Test harness initialisation */ 1004 lpc_mem = calloc(1, 1 * 1024 * 1024); 1005 assert(lpc_mem); 1006 1007 /* BMC initialisation */ 1008 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, 3 * MCTP_BTU, 1009 &kcs, lpc_mem); 1010 assert(!rc); 1011 1012 /* Host initialisation */ 1013 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, 1014 2 * MCTP_BTU, &kcs, lpc_mem); 1015 assert(!rc); 1016 1017 rc = mctp_astlpc_poll(bmc.astlpc); 1018 assert(rc == 0); 1019 1020 rc = mctp_astlpc_poll(host.astlpc); 1021 assert(rc == 0); 1022 1023 endpoint_destroy(&host); 1024 endpoint_destroy(&bmc); 1025 free(lpc_mem); 1026 } 1027 1028 static void astlpc_test_negotiate_mtu_low_high(void) 1029 { 1030 struct astlpc_endpoint bmc, host; 1031 uint8_t kcs[2] = { 0 }; 1032 uint32_t bmtu, hmtu; 1033 void *lpc_mem; 1034 int rc; 1035 1036 /* Test harness initialisation */ 1037 lpc_mem = calloc(1, 1 * 1024 * 1024); 1038 assert(lpc_mem); 1039 1040 /* BMC initialisation */ 1041 bmtu = 3 * MCTP_BTU; 1042 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, bmtu, &kcs, 1043 lpc_mem); 1044 assert(!rc); 1045 1046 /* Host initialisation with low MTU */ 1047 hmtu = 2 * MCTP_BTU; 1048 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, hmtu, &kcs, 1049 lpc_mem); 1050 assert(!rc); 1051 1052 /* Process low MTU proposal */ 1053 rc = mctp_astlpc_poll(bmc.astlpc); 1054 assert(rc == 0); 1055 1056 /* Accept low MTU proposal */ 1057 rc = mctp_astlpc_poll(host.astlpc); 1058 assert(rc == 0); 1059 1060 assert(host.astlpc->layout.rx.size == 1061 astlpc_layout_ctx.proto->packet_size(MCTP_PACKET_SIZE(hmtu))); 1062 1063 /* Tear-down the host so we can bring up a new one */ 1064 endpoint_destroy(&host); 1065 1066 /* 1067 * Bring up a new host endpoint with a higher MTU than we previously 1068 * negotiated 1069 */ 1070 hmtu = 3 * MCTP_BTU; 1071 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, hmtu, &kcs, 1072 lpc_mem); 1073 assert(!rc); 1074 1075 /* Process high MTU proposal */ 1076 rc = mctp_astlpc_poll(bmc.astlpc); 1077 assert(rc == 0); 1078 1079 /* Accept high MTU proposal */ 1080 rc = mctp_astlpc_poll(host.astlpc); 1081 assert(rc == 0); 1082 1083 assert(host.astlpc->layout.rx.size == 1084 astlpc_layout_ctx.proto->packet_size(MCTP_PACKET_SIZE(bmtu))); 1085 1086 endpoint_destroy(&host); 1087 endpoint_destroy(&bmc); 1088 free(lpc_mem); 1089 } 1090 1091 static void astlpc_test_send_large_packet(void) 1092 { 1093 struct astlpc_endpoint *bmc, *host; 1094 struct astlpc_test ctx; 1095 uint8_t kcs[2] = { 0 }; 1096 uint8_t tag = 0; 1097 void *lpc_mem; 1098 int rc; 1099 1100 host = &ctx.host; 1101 bmc = &ctx.bmc; 1102 1103 /* Test harness initialisation */ 1104 lpc_mem = calloc(1, 1 * 1024 * 1024); 1105 assert(lpc_mem); 1106 1107 /* BMC initialisation */ 1108 rc = endpoint_init(bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, 8192, &kcs, 1109 lpc_mem); 1110 assert(!rc); 1111 1112 /* Host initialisation */ 1113 rc = endpoint_init(host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, 8192, &kcs, 1114 lpc_mem); 1115 assert(!rc); 1116 1117 ctx.count = 0; 1118 mctp_set_rx_all(bmc->mctp, rx_message, &ctx); 1119 1120 rc = mctp_astlpc_poll(bmc->astlpc); 1121 assert(rc == 0); 1122 1123 rc = mctp_astlpc_poll(host->astlpc); 1124 assert(rc == 0); 1125 1126 ctx.msg = malloc(2 * MCTP_BODY_SIZE(8192)); 1127 assert(ctx.msg); 1128 1129 memset(ctx.msg, 0x5a, 2 * MCTP_BODY_SIZE(8192)); 1130 1131 rc = mctp_message_tx(host->mctp, 8, MCTP_MESSAGE_TO_DST, tag, ctx.msg, 1132 2 * MCTP_BODY_SIZE(8192)); 1133 assert(rc == 0); 1134 rc = mctp_astlpc_poll(bmc->astlpc); 1135 assert(rc == 0); 1136 rc = mctp_astlpc_poll(host->astlpc); 1137 assert(rc == 0); 1138 rc = mctp_astlpc_poll(bmc->astlpc); 1139 assert(rc == 0); 1140 rc = mctp_astlpc_poll(host->astlpc); 1141 assert(rc == 0); 1142 1143 assert(ctx.count == 1); 1144 1145 free(ctx.msg); 1146 endpoint_destroy(host); 1147 endpoint_destroy(bmc); 1148 free(lpc_mem); 1149 } 1150 1151 static void astlpc_test_tx_before_channel_init(void) 1152 { 1153 struct astlpc_endpoint *bmc; 1154 struct astlpc_test ctx; 1155 uint8_t kcs[2] = { 0 }; 1156 uint8_t msg[MCTP_BTU]; 1157 uint8_t tag = 0; 1158 void *lpc_mem; 1159 int rc; 1160 1161 bmc = &ctx.bmc; 1162 1163 /* Test harness initialisation */ 1164 lpc_mem = calloc(1, 1 * 1024 * 1024); 1165 assert(lpc_mem); 1166 1167 /* BMC initialisation */ 1168 rc = endpoint_init(bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, 0, &kcs, 1169 lpc_mem); 1170 assert(!rc); 1171 1172 memset(msg, '\0', sizeof(msg)); 1173 1174 /* 1175 * There was once a bug where the calculated MTU was 0 and the 1176 * packetisation loop in mctp_message_tx_on_bus() allocated all the 1177 * memory. Catch the bug and avoid OOMing the test machine by 1178 * terminating after a period long enough to packetise the message. 1179 */ 1180 alarm(1); 1181 mctp_message_tx(bmc->mctp, 9, MCTP_MESSAGE_TO_SRC, tag, msg, 1182 sizeof(msg)); 1183 alarm(0); 1184 1185 endpoint_destroy(bmc); 1186 free(lpc_mem); 1187 } 1188 1189 static void astlpc_test_corrupt_host_tx(void) 1190 { 1191 struct astlpc_test ctx = { 0 }; 1192 struct mctp_lpcmap_hdr *hdr; 1193 uint8_t msg[MCTP_BTU]; 1194 uint32_t offset; 1195 uint8_t tag = 0; 1196 uint32_t code; 1197 uint8_t *tlr; 1198 int rc; 1199 1200 /* Test harness initialisation */ 1201 1202 network_init(&ctx); 1203 1204 memset(&msg[0], 0xa5, MCTP_BTU); 1205 1206 ctx.msg = &msg[0]; 1207 ctx.count = 0; 1208 mctp_set_rx_all(ctx.bmc.mctp, rx_message, &ctx); 1209 1210 /* Host sends the single-packet message */ 1211 rc = mctp_message_tx(ctx.host.mctp, 8, MCTP_MESSAGE_TO_DST, tag, msg, 1212 sizeof(msg)); 1213 assert(rc == 0); 1214 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF); 1215 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01); 1216 1217 astlpc_assert_tx_packet(&ctx.host, &msg[0], MCTP_BTU); 1218 1219 /* Corrupt the CRC-32 in the message trailer */ 1220 hdr = (struct mctp_lpcmap_hdr *)ctx.lpc_mem; 1221 offset = be32toh(hdr->layout.tx_offset); 1222 tlr = (uint8_t *)&ctx.lpc_mem[offset] + 4 + sizeof(msg); 1223 memcpy(&code, tlr, sizeof(code)); 1224 code = ~code; 1225 memcpy(tlr, &code, sizeof(code)); 1226 1227 /* BMC receives the single-packet message */ 1228 mctp_astlpc_poll(ctx.bmc.astlpc); 1229 assert(ctx.count == 0); 1230 1231 /* BMC returns Tx area ownership to Host */ 1232 assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF)); 1233 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x02); 1234 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 1235 1236 /* Host dequeues ownership hand-over */ 1237 rc = mctp_astlpc_poll(ctx.host.astlpc); 1238 assert(rc == 0); 1239 1240 network_destroy(&ctx); 1241 } 1242 1243 static void astlpc_test_corrupt_bmc_tx(void) 1244 { 1245 struct astlpc_test ctx = { 0 }; 1246 struct mctp_lpcmap_hdr *hdr; 1247 uint8_t msg[MCTP_BTU]; 1248 uint32_t offset; 1249 uint8_t tag = 0; 1250 uint32_t code; 1251 uint8_t *tlr; 1252 int rc; 1253 1254 /* Test harness initialisation */ 1255 1256 network_init(&ctx); 1257 1258 memset(&msg[0], 0x5a, MCTP_BTU); 1259 1260 ctx.msg = &msg[0]; 1261 ctx.count = 0; 1262 mctp_set_rx_all(ctx.host.mctp, rx_message, &ctx); 1263 1264 /* BMC sends the single-packet message */ 1265 rc = mctp_message_tx(ctx.bmc.mctp, 9, MCTP_MESSAGE_TO_SRC, tag, msg, 1266 sizeof(msg)); 1267 assert(rc == 0); 1268 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 1269 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01); 1270 1271 /* Check that the BMC sent a fully-formed packet */ 1272 astlpc_assert_tx_packet(&ctx.bmc, &msg[0], MCTP_BTU); 1273 1274 /* Corrupt the CRC-32 in the message trailer */ 1275 hdr = (struct mctp_lpcmap_hdr *)ctx.lpc_mem; 1276 offset = be32toh(hdr->layout.rx_offset); 1277 tlr = (uint8_t *)&ctx.lpc_mem[offset] + 4 + sizeof(msg); 1278 memcpy(&code, tlr, sizeof(code)); 1279 code = ~code; 1280 memcpy(tlr, &code, sizeof(code)); 1281 1282 /* Host drops the single-packet message */ 1283 mctp_astlpc_poll(ctx.host.astlpc); 1284 assert(ctx.count == 0); 1285 1286 /* Host returns Rx area ownership to BMC */ 1287 assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF)); 1288 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x02); 1289 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF); 1290 1291 /* BMC dequeues ownership hand-over */ 1292 rc = mctp_astlpc_poll(ctx.bmc.astlpc); 1293 assert(rc == 0); 1294 1295 network_destroy(&ctx); 1296 } 1297 1298 /* clang-format off */ 1299 #define TEST_CASE(test) { #test, test } 1300 static const struct { 1301 const char *name; 1302 void (*test)(void); 1303 } astlpc_tests[] = { 1304 TEST_CASE(astlpc_test_simple_init), 1305 TEST_CASE(astlpc_test_bad_version), 1306 TEST_CASE(astlpc_test_incompatible_versions), 1307 TEST_CASE(astlpc_test_choose_bmc_ver_cur), 1308 TEST_CASE(astlpc_test_choose_host_ver_cur), 1309 TEST_CASE(astlpc_test_version_host_fails_negotiation), 1310 TEST_CASE(astlpc_test_version_bmc_fails_negotiation), 1311 TEST_CASE(astlpc_test_host_before_bmc), 1312 TEST_CASE(astlpc_test_simple_message_bmc_to_host), 1313 TEST_CASE(astlpc_test_simple_message_host_to_bmc), 1314 TEST_CASE(astlpc_test_packetised_message_bmc_to_host), 1315 TEST_CASE(astlpc_test_simple_indirect_message_bmc_to_host), 1316 TEST_CASE(astlpc_test_host_tx_bmc_gone), 1317 TEST_CASE(astlpc_test_poll_not_ready), 1318 TEST_CASE(astlpc_test_undefined_command), 1319 TEST_CASE(astlpc_test_buffers_rx_offset_overflow), 1320 TEST_CASE(astlpc_test_buffers_tx_offset_overflow), 1321 TEST_CASE(astlpc_test_buffers_rx_size_overflow), 1322 TEST_CASE(astlpc_test_buffers_tx_size_overflow), 1323 TEST_CASE(astlpc_test_buffers_rx_window_violation), 1324 TEST_CASE(astlpc_test_buffers_tx_window_violation), 1325 TEST_CASE(astlpc_test_buffers_rx_size_fails_btu), 1326 TEST_CASE(astlpc_test_buffers_tx_size_fails_btu), 1327 TEST_CASE(astlpc_test_buffers_overlap_rx_low), 1328 TEST_CASE(astlpc_test_buffers_overlap_tx_low), 1329 TEST_CASE(astlpc_test_buffers_bad_host_proposal), 1330 TEST_CASE(astlpc_test_buffers_bad_bmc_proposal), 1331 TEST_CASE(astlpc_test_buffers_bad_bmc_negotiation), 1332 TEST_CASE(astlpc_test_buffers_overlap_exact), 1333 TEST_CASE(astlpc_test_buffers_overlap_control), 1334 TEST_CASE(astlpc_test_buffers_bad_host_init), 1335 TEST_CASE(astlpc_test_negotiate_increased_mtu), 1336 TEST_CASE(astlpc_test_negotiate_mtu_low_high), 1337 TEST_CASE(astlpc_test_send_large_packet), 1338 TEST_CASE(astlpc_test_tx_before_channel_init), 1339 TEST_CASE(astlpc_test_corrupt_host_tx), 1340 TEST_CASE(astlpc_test_corrupt_bmc_tx), 1341 }; 1342 /* clang-format on */ 1343 1344 #ifndef BUILD_ASSERT 1345 #define BUILD_ASSERT(x) \ 1346 do { \ 1347 (void)sizeof(char[0 - (!(x))]); \ 1348 } while (0) 1349 #endif 1350 1351 int main(void) 1352 { 1353 size_t i; 1354 1355 mctp_set_log_stdio(MCTP_LOG_DEBUG); 1356 1357 BUILD_ASSERT(ARRAY_SIZE(astlpc_tests) < SIZE_MAX); 1358 for (i = 0; i < ARRAY_SIZE(astlpc_tests); i++) { 1359 mctp_prlog(MCTP_LOG_DEBUG, "begin: %s", astlpc_tests[i].name); 1360 astlpc_tests[i].test(); 1361 mctp_prlog(MCTP_LOG_DEBUG, "end: %s\n", astlpc_tests[i].name); 1362 } 1363 1364 return 0; 1365 } 1366