1 /* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */ 2 3 #ifdef HAVE_CONFIG_H 4 #include "config.h" 5 #endif 6 7 #define ASTLPC_VER_CUR 2 8 #include "astlpc.c" 9 10 #ifdef pr_fmt 11 #undef pr_fmt 12 #define pr_fmt(x) "test: " x 13 #endif 14 15 #include "libmctp-astlpc.h" 16 #include "libmctp-log.h" 17 #include "container_of.h" 18 19 #ifdef NDEBUG 20 #undef NDEBUG 21 #endif 22 23 #include <assert.h> 24 #include <limits.h> 25 #include <stdint.h> 26 #include <stdio.h> 27 #include <stdlib.h> 28 #include <string.h> 29 #include <sys/random.h> 30 #include <unistd.h> 31 32 #ifndef ARRAY_SIZE 33 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) 34 #endif 35 36 struct mctp_binding_astlpc_mmio { 37 struct mctp_binding_astlpc astlpc; 38 bool bmc; 39 40 uint8_t (*kcs)[2]; 41 42 size_t lpc_size; 43 uint8_t *lpc; 44 }; 45 46 struct astlpc_endpoint { 47 struct mctp_binding_astlpc_mmio mmio; 48 struct mctp_binding_astlpc *astlpc; 49 struct mctp *mctp; 50 }; 51 52 struct astlpc_test { 53 struct astlpc_endpoint bmc; 54 struct astlpc_endpoint host; 55 uint8_t kcs[2]; 56 uint8_t *lpc_mem; 57 58 void *msg; 59 uint8_t count; 60 }; 61 62 #define binding_to_mmio(b) \ 63 container_of(b, struct mctp_binding_astlpc_mmio, astlpc) 64 65 static int mctp_astlpc_mmio_kcs_read(void *data, 66 enum mctp_binding_astlpc_kcs_reg reg, 67 uint8_t *val) 68 { 69 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 70 71 *val = (*mmio->kcs)[reg]; 72 73 mctp_prdebug("%s: 0x%hhx from %s", __func__, *val, 74 reg ? "status" : "data"); 75 76 if (reg == MCTP_ASTLPC_KCS_REG_DATA) { 77 uint8_t flag = mmio->bmc ? KCS_STATUS_IBF : KCS_STATUS_OBF; 78 (*mmio->kcs)[MCTP_ASTLPC_KCS_REG_STATUS] &= ~flag; 79 } 80 81 return 0; 82 } 83 84 static int mctp_astlpc_mmio_kcs_write(void *data, 85 enum mctp_binding_astlpc_kcs_reg reg, 86 uint8_t val) 87 { 88 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 89 uint8_t *regp; 90 91 assert(reg == MCTP_ASTLPC_KCS_REG_DATA || 92 reg == MCTP_ASTLPC_KCS_REG_STATUS); 93 94 if (reg == MCTP_ASTLPC_KCS_REG_DATA) { 95 uint8_t flag = mmio->bmc ? KCS_STATUS_OBF : KCS_STATUS_IBF; 96 (*mmio->kcs)[MCTP_ASTLPC_KCS_REG_STATUS] |= flag; 97 } 98 99 regp = &(*mmio->kcs)[reg]; 100 if (reg == MCTP_ASTLPC_KCS_REG_STATUS) 101 *regp = (val & ~0xbU) | (val & *regp & 1); 102 else 103 *regp = val; 104 105 mctp_prdebug("%s: 0x%hhx to %s", __func__, val, 106 reg ? "status" : "data"); 107 108 return 0; 109 } 110 111 static const struct mctp_binding_astlpc_ops astlpc_direct_mmio_ops = { 112 .kcs_read = mctp_astlpc_mmio_kcs_read, 113 .kcs_write = mctp_astlpc_mmio_kcs_write, 114 }; 115 116 int mctp_astlpc_mmio_lpc_read(void *data, void *buf, long offset, size_t len) 117 { 118 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 119 120 mctp_prdebug("%s: %zu bytes from 0x%lx", __func__, len, offset); 121 122 assert(offset >= 0L); 123 assert(offset + len < mmio->lpc_size); 124 125 memcpy(buf, mmio->lpc + offset, len); 126 127 return 0; 128 } 129 130 int mctp_astlpc_mmio_lpc_write(void *data, const void *buf, long offset, 131 size_t len) 132 { 133 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 134 135 mctp_prdebug("%s: %zu bytes to 0x%lx", __func__, len, offset); 136 137 assert(offset >= 0L); 138 assert(offset + len < mmio->lpc_size); 139 140 memcpy(mmio->lpc + offset, buf, len); 141 142 return 0; 143 } 144 145 static const struct mctp_binding_astlpc_ops astlpc_indirect_mmio_ops = { 146 .kcs_read = mctp_astlpc_mmio_kcs_read, 147 .kcs_write = mctp_astlpc_mmio_kcs_write, 148 .lpc_read = mctp_astlpc_mmio_lpc_read, 149 .lpc_write = mctp_astlpc_mmio_lpc_write, 150 }; 151 152 #define __unused __attribute__((unused)) 153 154 static void rx_message(uint8_t eid __unused, void *data __unused, void *msg, 155 size_t len) 156 { 157 struct astlpc_test *test = data; 158 159 mctp_prdebug("MCTP message received: msg: %p, len %zd", msg, len); 160 161 assert(len > 0); 162 assert(msg); 163 assert(test); 164 assert(test->msg); 165 assert(!memcmp(test->msg, msg, len)); 166 167 test->count++; 168 } 169 170 static int endpoint_init(struct astlpc_endpoint *ep, mctp_eid_t eid, 171 uint8_t mode, uint32_t mtu, uint8_t (*kcs)[2], 172 void *lpc_mem) 173 { 174 /* 175 * Configure the direction of the KCS interface so we know whether to 176 * set or clear IBF or OBF on writes or reads. 177 */ 178 ep->mmio.bmc = (mode == MCTP_BINDING_ASTLPC_MODE_BMC); 179 180 ep->mctp = mctp_init(); 181 assert(ep->mctp); 182 183 /* Inject KCS registers */ 184 ep->mmio.kcs = kcs; 185 186 /* Initialise the binding */ 187 ep->astlpc = mctp_astlpc_init(mode, mtu, lpc_mem, 188 &astlpc_direct_mmio_ops, &ep->mmio); 189 assert(ep->astlpc); 190 191 return mctp_register_bus(ep->mctp, &ep->astlpc->binding, eid); 192 } 193 194 static void endpoint_destroy(struct astlpc_endpoint *ep) 195 { 196 mctp_astlpc_destroy(ep->astlpc); 197 mctp_destroy(ep->mctp); 198 } 199 200 static void network_init(struct astlpc_test *ctx) 201 { 202 int rc; 203 204 ctx->lpc_mem = calloc(1, 1 * 1024 * 1024); 205 assert(ctx->lpc_mem); 206 207 /* BMC initialisation */ 208 rc = endpoint_init(&ctx->bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 209 &ctx->kcs, ctx->lpc_mem); 210 assert(!rc); 211 assert(ctx->kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_BMC_READY); 212 213 /* Host initialisation */ 214 rc = endpoint_init(&ctx->host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, 215 MCTP_BTU, &ctx->kcs, ctx->lpc_mem); 216 assert(!rc); 217 218 /* BMC processes host channel init request, alerts host */ 219 mctp_astlpc_poll(ctx->bmc.astlpc); 220 assert(ctx->kcs[MCTP_ASTLPC_KCS_REG_STATUS] & 221 KCS_STATUS_CHANNEL_ACTIVE); 222 assert(ctx->kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0xff); 223 224 /* Host dequeues channel init result */ 225 mctp_astlpc_poll(ctx->host.astlpc); 226 } 227 228 static void network_destroy(struct astlpc_test *ctx) 229 { 230 endpoint_destroy(&ctx->bmc); 231 endpoint_destroy(&ctx->host); 232 free(ctx->lpc_mem); 233 } 234 235 static void astlpc_assert_tx_packet(struct astlpc_endpoint *src, 236 const void *expected, size_t len) 237 { 238 const size_t tx_body = src->astlpc->layout.tx.offset + 4 + 4; 239 const void *test = ((char *)src->astlpc->lpc_map) + tx_body; 240 assert(!memcmp(test, expected, len)); 241 } 242 243 static void astlpc_test_packetised_message_bmc_to_host(void) 244 { 245 struct astlpc_test ctx = { 0 }; 246 uint8_t msg[2 * MCTP_BTU]; 247 int rc; 248 249 /* Test harness initialisation */ 250 251 network_init(&ctx); 252 253 memset(&msg[0], 0x5a, MCTP_BTU); 254 memset(&msg[MCTP_BTU], 0xa5, MCTP_BTU); 255 256 ctx.msg = &msg[0]; 257 ctx.count = 0; 258 mctp_set_rx_all(ctx.host.mctp, rx_message, &ctx); 259 260 /* BMC sends a message */ 261 rc = mctp_message_tx(ctx.bmc.mctp, 9, msg, sizeof(msg)); 262 assert(rc == 0); 263 264 /* Host receives the first packet */ 265 mctp_astlpc_poll(ctx.host.astlpc); 266 267 /* BMC dequeues ownership hand-over and sends the queued packet */ 268 rc = mctp_astlpc_poll(ctx.bmc.astlpc); 269 assert(rc == 0); 270 271 /* Host receives the next packet */ 272 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 273 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01); 274 275 astlpc_assert_tx_packet(&ctx.bmc, &msg[MCTP_BTU], MCTP_BTU); 276 277 /* Host receives final packet */ 278 mctp_astlpc_poll(ctx.host.astlpc); 279 assert(ctx.count == 1); 280 281 network_destroy(&ctx); 282 } 283 284 static void astlpc_test_simple_message_host_to_bmc(void) 285 { 286 struct astlpc_test ctx = { 0 }; 287 uint8_t msg[MCTP_BTU]; 288 int rc; 289 290 /* Test harness initialisation */ 291 292 network_init(&ctx); 293 294 memset(&msg[0], 0xa5, MCTP_BTU); 295 296 ctx.msg = &msg[0]; 297 ctx.count = 0; 298 mctp_set_rx_all(ctx.bmc.mctp, rx_message, &ctx); 299 300 /* Host sends the single-packet message */ 301 rc = mctp_message_tx(ctx.host.mctp, 8, msg, sizeof(msg)); 302 assert(rc == 0); 303 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF); 304 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01); 305 306 astlpc_assert_tx_packet(&ctx.host, &msg[0], MCTP_BTU); 307 308 /* BMC receives the single-packet message */ 309 mctp_astlpc_poll(ctx.bmc.astlpc); 310 assert(ctx.count == 1); 311 312 /* BMC returns Tx area ownership to Host */ 313 assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF)); 314 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x02); 315 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 316 317 /* Host dequeues ownership hand-over and sends the queued packet */ 318 rc = mctp_astlpc_poll(ctx.host.astlpc); 319 assert(rc == 0); 320 321 network_destroy(&ctx); 322 } 323 324 static void astlpc_test_simple_message_bmc_to_host(void) 325 { 326 struct astlpc_test ctx = { 0 }; 327 uint8_t msg[MCTP_BTU]; 328 int rc; 329 330 /* Test harness initialisation */ 331 332 network_init(&ctx); 333 334 memset(&msg[0], 0x5a, MCTP_BTU); 335 336 ctx.msg = &msg[0]; 337 ctx.count = 0; 338 mctp_set_rx_all(ctx.host.mctp, rx_message, &ctx); 339 340 /* BMC sends the single-packet message */ 341 rc = mctp_message_tx(ctx.bmc.mctp, 9, msg, sizeof(msg)); 342 assert(rc == 0); 343 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 344 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01); 345 346 astlpc_assert_tx_packet(&ctx.bmc, &msg[0], MCTP_BTU); 347 348 /* Host receives the single-packet message */ 349 mctp_astlpc_poll(ctx.host.astlpc); 350 assert(ctx.count == 1); 351 352 /* Host returns Rx area ownership to BMC */ 353 assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF)); 354 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x02); 355 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF); 356 357 /* BMC dequeues ownership hand-over and sends the queued packet */ 358 rc = mctp_astlpc_poll(ctx.bmc.astlpc); 359 assert(rc == 0); 360 361 network_destroy(&ctx); 362 } 363 364 static void astlpc_test_host_before_bmc(void) 365 { 366 struct mctp_binding_astlpc_mmio mmio = { 0 }; 367 struct mctp_binding_astlpc *astlpc; 368 uint8_t kcs[2] = { 0 }; 369 struct mctp *mctp; 370 int rc; 371 372 mctp = mctp_init(); 373 assert(mctp); 374 375 /* Inject KCS registers */ 376 mmio.kcs = &kcs; 377 378 /* Initialise the binding */ 379 astlpc = mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, NULL, 380 &astlpc_direct_mmio_ops, &mmio); 381 382 /* Register the binding to trigger the start-up sequence */ 383 rc = mctp_register_bus(mctp, &astlpc->binding, 8); 384 385 /* Start-up should fail as we haven't initialised the BMC */ 386 assert(rc < 0); 387 388 mctp_astlpc_destroy(astlpc); 389 mctp_destroy(mctp); 390 } 391 392 static void astlpc_test_bad_version(void) 393 { 394 assert(0 == 395 mctp_astlpc_negotiate_version(ASTLPC_VER_BAD, ASTLPC_VER_CUR, 396 ASTLPC_VER_MIN, ASTLPC_VER_CUR)); 397 assert(0 == 398 mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_BAD, 399 ASTLPC_VER_MIN, ASTLPC_VER_CUR)); 400 assert(0 == 401 mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_CUR, 402 ASTLPC_VER_BAD, ASTLPC_VER_CUR)); 403 assert(0 == 404 mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_CUR, 405 ASTLPC_VER_MIN, ASTLPC_VER_BAD)); 406 assert(0 == mctp_astlpc_negotiate_version( 407 ASTLPC_VER_CUR + 1, ASTLPC_VER_CUR, ASTLPC_VER_MIN, 408 ASTLPC_VER_CUR + 1)); 409 assert(0 == mctp_astlpc_negotiate_version( 410 ASTLPC_VER_MIN, ASTLPC_VER_CUR + 1, 411 ASTLPC_VER_CUR + 1, ASTLPC_VER_CUR)); 412 } 413 414 static void astlpc_test_incompatible_versions(void) 415 { 416 assert(0 == mctp_astlpc_negotiate_version( 417 ASTLPC_VER_CUR, ASTLPC_VER_CUR, ASTLPC_VER_CUR + 1, 418 ASTLPC_VER_CUR + 1)); 419 assert(0 == mctp_astlpc_negotiate_version( 420 ASTLPC_VER_CUR + 1, ASTLPC_VER_CUR + 1, 421 ASTLPC_VER_CUR, ASTLPC_VER_CUR)); 422 } 423 424 static void astlpc_test_choose_bmc_ver_cur(void) 425 { 426 assert(2 == mctp_astlpc_negotiate_version(1, 2, 2, 3)); 427 } 428 429 static void astlpc_test_choose_host_ver_cur(void) 430 { 431 assert(2 == mctp_astlpc_negotiate_version(2, 3, 1, 2)); 432 } 433 434 static void astlpc_test_version_host_fails_negotiation(void) 435 { 436 struct astlpc_endpoint bmc, host; 437 struct mctp_lpcmap_hdr *hdr; 438 uint8_t kcs[2] = { 0 }; 439 void *lpc_mem; 440 int rc; 441 442 /* Test harness initialisation */ 443 lpc_mem = calloc(1, 1 * 1024 * 1024); 444 assert(lpc_mem); 445 446 /* BMC initialisation */ 447 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 448 &kcs, lpc_mem); 449 assert(!rc); 450 451 /* Now the BMC is initialised, break its version announcement */ 452 hdr = lpc_mem; 453 hdr->bmc_ver_cur = ASTLPC_VER_BAD; 454 455 /* Host initialisation */ 456 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 457 &kcs, lpc_mem); 458 assert(rc < 0); 459 460 endpoint_destroy(&bmc); 461 endpoint_destroy(&host); 462 free(lpc_mem); 463 } 464 465 static void astlpc_test_version_bmc_fails_negotiation(void) 466 { 467 struct astlpc_endpoint bmc, host; 468 struct mctp_lpcmap_hdr *hdr; 469 uint8_t kcs[2] = { 0 }; 470 void *lpc_mem; 471 int rc; 472 473 /* Test harness initialisation */ 474 lpc_mem = calloc(1, 1 * 1024 * 1024); 475 assert(lpc_mem); 476 477 /* BMC initialisation */ 478 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 479 &kcs, lpc_mem); 480 assert(!rc); 481 482 /* Host initialisation */ 483 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 484 &kcs, lpc_mem); 485 assert(!rc); 486 487 /* Now the host is initialised, break its version announcement */ 488 hdr = lpc_mem; 489 hdr->host_ver_cur = ASTLPC_VER_BAD; 490 491 /* Poll the BMC to detect the broken host version */ 492 mctp_astlpc_poll(bmc.astlpc); 493 assert(!(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_CHANNEL_ACTIVE)); 494 495 /* Poll the host so it detects failed negotiation */ 496 rc = mctp_astlpc_poll(host.astlpc); 497 assert(rc < 0); 498 499 endpoint_destroy(&bmc); 500 endpoint_destroy(&host); 501 free(lpc_mem); 502 } 503 504 static void astlpc_test_simple_init(void) 505 { 506 struct astlpc_endpoint bmc, host; 507 uint8_t kcs[2] = { 0 }; 508 void *lpc_mem; 509 int rc; 510 511 /* Test harness initialisation */ 512 lpc_mem = calloc(1, 1 * 1024 * 1024); 513 assert(lpc_mem); 514 515 /* BMC initialisation */ 516 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 517 &kcs, lpc_mem); 518 assert(!rc); 519 520 /* Verify the BMC binding was initialised */ 521 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_BMC_READY); 522 523 /* Host initialisation */ 524 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 525 &kcs, lpc_mem); 526 assert(!rc); 527 528 /* Host sends channel init command */ 529 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF); 530 assert(kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x00); 531 532 /* BMC receives host channel init request */ 533 mctp_astlpc_poll(bmc.astlpc); 534 535 /* BMC sends init response */ 536 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 537 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_CHANNEL_ACTIVE); 538 assert(kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0xff); 539 540 /* Host dequeues data */ 541 mctp_astlpc_poll(host.astlpc); 542 543 endpoint_destroy(&bmc); 544 endpoint_destroy(&host); 545 free(lpc_mem); 546 } 547 548 static void astlpc_test_simple_indirect_message_bmc_to_host(void) 549 { 550 struct astlpc_test ctx = { 0 }; 551 uint8_t kcs[2] = { 0 }; 552 uint8_t msg[MCTP_BTU]; 553 int rc; 554 555 ctx.lpc_mem = calloc(1, LPC_WIN_SIZE); 556 assert(ctx.lpc_mem); 557 558 /* Test message data */ 559 memset(&msg[0], 0x5a, MCTP_BTU); 560 561 /* Manually set up the network so we can inject the indirect ops */ 562 563 /* BMC initialisation */ 564 ctx.bmc.mmio.bmc = true; 565 ctx.bmc.mctp = mctp_init(); 566 assert(ctx.bmc.mctp); 567 ctx.bmc.mmio.kcs = &kcs; 568 ctx.bmc.mmio.lpc = ctx.lpc_mem; 569 ctx.bmc.mmio.lpc_size = LPC_WIN_SIZE; 570 ctx.bmc.astlpc = 571 mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, NULL, 572 &astlpc_indirect_mmio_ops, &ctx.bmc.mmio); 573 mctp_register_bus(ctx.bmc.mctp, &ctx.bmc.astlpc->binding, 8); 574 575 /* Host initialisation */ 576 ctx.host.mmio.bmc = false; 577 ctx.host.mctp = mctp_init(); 578 assert(ctx.host.mctp); 579 ctx.host.mmio.kcs = &kcs; 580 ctx.host.mmio.lpc = ctx.lpc_mem; 581 ctx.host.mmio.lpc_size = LPC_WIN_SIZE; 582 ctx.host.astlpc = 583 mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, NULL, 584 &astlpc_indirect_mmio_ops, &ctx.host.mmio); 585 mctp_register_bus(ctx.host.mctp, &ctx.host.astlpc->binding, 9); 586 587 /* BMC processes host channel init request, alerts host */ 588 mctp_astlpc_poll(ctx.bmc.astlpc); 589 590 /* Host dequeues channel init result */ 591 mctp_astlpc_poll(ctx.host.astlpc); 592 593 ctx.msg = &msg[0]; 594 ctx.count = 0; 595 mctp_set_rx_all(ctx.host.mctp, rx_message, &ctx); 596 597 /* BMC sends the single-packet message */ 598 rc = mctp_message_tx(ctx.bmc.mctp, 9, msg, sizeof(msg)); 599 assert(rc == 0); 600 601 /* Host receives the single-packet message */ 602 rc = mctp_astlpc_poll(ctx.host.astlpc); 603 assert(rc == 0); 604 assert(ctx.count == 1); 605 606 /* BMC dequeues ownership hand-over and sends the queued packet */ 607 rc = mctp_astlpc_poll(ctx.bmc.astlpc); 608 assert(rc == 0); 609 610 /* Can still tear-down the network in the normal fashion */ 611 network_destroy(&ctx); 612 } 613 614 static void astlpc_test_host_tx_bmc_gone(void) 615 { 616 struct astlpc_test ctx = { 0 }; 617 uint8_t unwritten[MCTP_BTU]; 618 uint8_t msg[MCTP_BTU]; 619 int rc; 620 621 /* Test harness initialisation */ 622 623 network_init(&ctx); 624 625 memset(&msg[0], 0x5a, sizeof(msg)); 626 memset(&unwritten[0], 0, sizeof(unwritten)); 627 628 ctx.msg = &msg[0]; 629 ctx.count = 0; 630 631 /* Clear bmc-ready */ 632 endpoint_destroy(&ctx.bmc); 633 634 /* Host detects that the BMC is disabled */ 635 mctp_astlpc_poll(ctx.host.astlpc); 636 637 /* Host attempts to send the single-packet message, but is prevented */ 638 rc = mctp_message_tx(ctx.host.mctp, 8, msg, sizeof(msg)); 639 assert(rc == 0); 640 assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF)); 641 astlpc_assert_tx_packet(&ctx.host, &unwritten[0], MCTP_BTU); 642 643 /* BMC comes back */ 644 rc = endpoint_init(&ctx.bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 645 &ctx.kcs, ctx.lpc_mem); 646 assert(!rc); 647 mctp_set_rx_all(ctx.bmc.mctp, rx_message, &ctx); 648 649 /* Host triggers channel init */ 650 mctp_astlpc_poll(ctx.host.astlpc); 651 652 /* BMC handles channel init */ 653 mctp_astlpc_poll(ctx.bmc.astlpc); 654 655 /* Host completes channel init, flushing the Tx queue */ 656 mctp_astlpc_poll(ctx.host.astlpc); 657 658 /* BMC receives the single-packet message */ 659 mctp_astlpc_poll(ctx.bmc.astlpc); 660 assert(ctx.count == 1); 661 662 network_destroy(&ctx); 663 } 664 665 static void astlpc_test_poll_not_ready(void) 666 { 667 struct astlpc_endpoint bmc; 668 uint8_t kcs[2] = { 0 }; 669 void *lpc_mem; 670 int rc; 671 672 /* Test harness initialisation */ 673 lpc_mem = calloc(1, 1 * 1024 * 1024); 674 assert(lpc_mem); 675 676 /* BMC initialisation */ 677 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 678 &kcs, lpc_mem); 679 assert(!rc); 680 681 /* Check for a command despite none present */ 682 rc = mctp_astlpc_poll(bmc.astlpc); 683 684 /* Make sure it doesn't fail */ 685 assert(rc == 0); 686 687 endpoint_destroy(&bmc); 688 free(lpc_mem); 689 } 690 691 static void astlpc_test_undefined_command(void) 692 { 693 struct astlpc_endpoint bmc; 694 uint8_t kcs[2] = { 0 }; 695 void *lpc_mem; 696 int rc; 697 698 /* Test harness initialisation */ 699 lpc_mem = calloc(1, 1 * 1024 * 1024); 700 assert(lpc_mem); 701 702 /* BMC initialisation */ 703 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 704 &kcs, lpc_mem); 705 assert(!rc); 706 707 /* 0x5a isn't legal in v1 or v2 */ 708 kcs[MCTP_ASTLPC_KCS_REG_DATA] = 0x5a; 709 kcs[MCTP_ASTLPC_KCS_REG_STATUS] |= KCS_STATUS_IBF; 710 711 /* Check for a command despite none present */ 712 rc = mctp_astlpc_poll(bmc.astlpc); 713 714 /* Make sure it doesn't fail, bad command should be discarded */ 715 assert(rc == 0); 716 717 endpoint_destroy(&bmc); 718 free(lpc_mem); 719 } 720 721 #define BUFFER_MIN (MCTP_PACKET_SIZE(MCTP_BTU) + 4) 722 static const struct mctp_binding_astlpc astlpc_layout_ctx = { 723 .proto = &astlpc_protocol_version[2], 724 }; 725 726 static void astlpc_test_buffers_rx_offset_overflow(void) 727 { 728 struct mctp_astlpc_layout l = { 729 .rx = { UINT32_MAX, BUFFER_MIN }, 730 .tx = { control_size, BUFFER_MIN }, 731 }; 732 733 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 734 } 735 736 static void astlpc_test_buffers_tx_offset_overflow(void) 737 { 738 struct mctp_astlpc_layout l = { 739 .rx = { control_size, BUFFER_MIN }, 740 .tx = { UINT32_MAX, BUFFER_MIN }, 741 }; 742 743 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 744 } 745 746 static void astlpc_test_buffers_rx_size_overflow(void) 747 { 748 struct mctp_astlpc_layout l = { 749 .rx = { control_size + BUFFER_MIN, UINT32_MAX }, 750 .tx = { control_size, BUFFER_MIN }, 751 }; 752 753 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 754 } 755 756 static void astlpc_test_buffers_tx_size_overflow(void) 757 { 758 struct mctp_astlpc_layout l = { 759 .rx = { control_size, BUFFER_MIN }, 760 .tx = { control_size + BUFFER_MIN, UINT32_MAX }, 761 }; 762 763 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 764 } 765 766 static void astlpc_test_buffers_rx_window_violation(void) 767 { 768 struct mctp_astlpc_layout l = { 769 .rx = { LPC_WIN_SIZE - BUFFER_MIN + 1, BUFFER_MIN }, 770 .tx = { control_size, BUFFER_MIN }, 771 }; 772 773 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 774 } 775 776 static void astlpc_test_buffers_tx_window_violation(void) 777 { 778 struct mctp_astlpc_layout l = { 779 .rx = { control_size, BUFFER_MIN }, 780 .tx = { LPC_WIN_SIZE - BUFFER_MIN + 1, BUFFER_MIN }, 781 }; 782 783 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 784 } 785 786 static void astlpc_test_buffers_rx_size_fails_btu(void) 787 { 788 struct mctp_astlpc_layout l = { 789 .rx = { control_size, BUFFER_MIN - 1 }, 790 .tx = { control_size + BUFFER_MIN, BUFFER_MIN }, 791 }; 792 793 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 794 } 795 796 static void astlpc_test_buffers_tx_size_fails_btu(void) 797 { 798 struct mctp_astlpc_layout l = { 799 .rx = { control_size, BUFFER_MIN }, 800 .tx = { control_size + BUFFER_MIN, BUFFER_MIN - 1 }, 801 }; 802 803 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 804 } 805 806 static void astlpc_test_buffers_overlap_rx_low(void) 807 { 808 struct mctp_astlpc_layout l = { 809 .rx = { control_size, 2 * BUFFER_MIN }, 810 .tx = { control_size + BUFFER_MIN, 2 * BUFFER_MIN }, 811 }; 812 813 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 814 } 815 816 static void astlpc_test_buffers_overlap_tx_low(void) 817 { 818 struct mctp_astlpc_layout l = { 819 .rx = { control_size + BUFFER_MIN, 2 * BUFFER_MIN }, 820 .tx = { control_size, 2 * BUFFER_MIN }, 821 }; 822 823 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 824 } 825 826 static void astlpc_test_buffers_overlap_exact(void) 827 { 828 struct mctp_astlpc_layout l = { 829 .rx = { control_size, 2 * BUFFER_MIN }, 830 .tx = { control_size, 2 * BUFFER_MIN }, 831 }; 832 833 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 834 } 835 836 static void astlpc_test_buffers_overlap_control(void) 837 { 838 struct mctp_astlpc_layout l = { 839 .rx = { 0, BUFFER_MIN }, 840 .tx = { control_size + BUFFER_MIN, BUFFER_MIN }, 841 }; 842 843 assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l)); 844 } 845 846 static void astlpc_test_buffers_bad_host_proposal(void) 847 { 848 struct astlpc_endpoint bmc, host; 849 struct mctp_lpcmap_hdr *hdr; 850 uint8_t kcs[2] = { 0 }; 851 void *lpc_mem; 852 int rc; 853 854 /* Test harness initialisation */ 855 lpc_mem = calloc(1, 1 * 1024 * 1024); 856 assert(lpc_mem); 857 858 /* BMC initialisation */ 859 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 860 &kcs, lpc_mem); 861 assert(!rc); 862 863 /* Host initialisation */ 864 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 865 &kcs, lpc_mem); 866 assert(!rc); 867 868 /* 869 * Now that the host has initialised the control area, break 870 * something before polling the BMC 871 */ 872 hdr = lpc_mem; 873 hdr->layout.rx_size = 0; 874 875 mctp_astlpc_poll(bmc.astlpc); 876 877 /* Make sure the BMC has not set the channel to active */ 878 assert(!(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_CHANNEL_ACTIVE)); 879 880 endpoint_destroy(&host); 881 endpoint_destroy(&bmc); 882 free(lpc_mem); 883 } 884 885 static void astlpc_test_buffers_bad_bmc_proposal(void) 886 { 887 struct astlpc_endpoint bmc, host; 888 struct mctp_lpcmap_hdr *hdr; 889 uint8_t kcs[2] = { 0 }; 890 void *lpc_mem; 891 int rc; 892 893 /* Test harness initialisation */ 894 lpc_mem = calloc(1, 1 * 1024 * 1024); 895 assert(lpc_mem); 896 897 /* BMC initialisation */ 898 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 899 &kcs, lpc_mem); 900 assert(!rc); 901 902 /* 903 * Now that the BMC has initialised the control area, break something 904 * before initialising the host 905 */ 906 hdr = lpc_mem; 907 hdr->layout.rx_size = 0; 908 909 /* Host initialisation: Fails due to bad layout */ 910 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 911 &kcs, lpc_mem); 912 assert(rc < 0); 913 914 endpoint_destroy(&host); 915 endpoint_destroy(&bmc); 916 free(lpc_mem); 917 } 918 919 static void astlpc_test_buffers_bad_bmc_negotiation(void) 920 { 921 struct astlpc_endpoint bmc, host; 922 struct mctp_lpcmap_hdr *hdr; 923 uint8_t kcs[2] = { 0 }; 924 void *lpc_mem; 925 int rc; 926 927 /* Test harness initialisation */ 928 lpc_mem = calloc(1, 1 * 1024 * 1024); 929 assert(lpc_mem); 930 931 /* BMC initialisation */ 932 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 933 &kcs, lpc_mem); 934 assert(!rc); 935 936 /* Host initialisation */ 937 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 938 &kcs, lpc_mem); 939 assert(!rc); 940 941 mctp_astlpc_poll(bmc.astlpc); 942 943 /* 944 * Now that the BMC has initialised the control area, break something 945 * before polling the host 946 */ 947 hdr = lpc_mem; 948 hdr->layout.rx_size = 0; 949 950 rc = mctp_astlpc_poll(host.astlpc); 951 assert(rc < 0); 952 953 endpoint_destroy(&host); 954 endpoint_destroy(&bmc); 955 free(lpc_mem); 956 } 957 958 static void astlpc_test_buffers_bad_host_init(void) 959 { 960 struct astlpc_endpoint host; 961 uint8_t kcs[2] = { 0 }; 962 void *lpc_mem; 963 int rc; 964 965 /* Test harness initialisation */ 966 lpc_mem = calloc(1, 1 * 1024 * 1024); 967 assert(lpc_mem); 968 969 host.mctp = mctp_init(); 970 assert(host.mctp); 971 host.mmio.kcs = &kcs; 972 host.mmio.bmc = false; 973 974 /* Set the MTU to 0 to provoke a failure */ 975 host.astlpc = 976 mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_HOST, 0, lpc_mem, 977 &astlpc_direct_mmio_ops, &host.mmio); 978 979 rc = mctp_register_bus(host.mctp, &host.astlpc->binding, 8); 980 assert(rc < 0); 981 982 mctp_astlpc_destroy(host.astlpc); 983 mctp_destroy(host.mctp); 984 free(lpc_mem); 985 } 986 987 static void astlpc_test_negotiate_increased_mtu(void) 988 { 989 struct astlpc_endpoint bmc, host; 990 uint8_t kcs[2] = { 0 }; 991 void *lpc_mem; 992 int rc; 993 994 /* Test harness initialisation */ 995 lpc_mem = calloc(1, 1 * 1024 * 1024); 996 assert(lpc_mem); 997 998 /* BMC initialisation */ 999 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, 3 * MCTP_BTU, 1000 &kcs, lpc_mem); 1001 assert(!rc); 1002 1003 /* Host initialisation */ 1004 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, 1005 2 * MCTP_BTU, &kcs, lpc_mem); 1006 assert(!rc); 1007 1008 rc = mctp_astlpc_poll(bmc.astlpc); 1009 assert(rc == 0); 1010 1011 rc = mctp_astlpc_poll(host.astlpc); 1012 assert(rc == 0); 1013 1014 endpoint_destroy(&host); 1015 endpoint_destroy(&bmc); 1016 free(lpc_mem); 1017 } 1018 1019 static void astlpc_test_negotiate_mtu_low_high(void) 1020 { 1021 struct astlpc_endpoint bmc, host; 1022 uint8_t kcs[2] = { 0 }; 1023 uint32_t bmtu, hmtu; 1024 void *lpc_mem; 1025 int rc; 1026 1027 /* Test harness initialisation */ 1028 lpc_mem = calloc(1, 1 * 1024 * 1024); 1029 assert(lpc_mem); 1030 1031 /* BMC initialisation */ 1032 bmtu = 3 * MCTP_BTU; 1033 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, bmtu, &kcs, 1034 lpc_mem); 1035 assert(!rc); 1036 1037 /* Host initialisation with low MTU */ 1038 hmtu = 2 * MCTP_BTU; 1039 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, hmtu, &kcs, 1040 lpc_mem); 1041 assert(!rc); 1042 1043 /* Process low MTU proposal */ 1044 rc = mctp_astlpc_poll(bmc.astlpc); 1045 assert(rc == 0); 1046 1047 /* Accept low MTU proposal */ 1048 rc = mctp_astlpc_poll(host.astlpc); 1049 assert(rc == 0); 1050 1051 assert(host.astlpc->layout.rx.size == 1052 astlpc_layout_ctx.proto->packet_size(MCTP_PACKET_SIZE(hmtu))); 1053 1054 /* Tear-down the host so we can bring up a new one */ 1055 endpoint_destroy(&host); 1056 1057 /* 1058 * Bring up a new host endpoint with a higher MTU than we previously 1059 * negotiated 1060 */ 1061 hmtu = 3 * MCTP_BTU; 1062 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, hmtu, &kcs, 1063 lpc_mem); 1064 assert(!rc); 1065 1066 /* Process high MTU proposal */ 1067 rc = mctp_astlpc_poll(bmc.astlpc); 1068 assert(rc == 0); 1069 1070 /* Accept high MTU proposal */ 1071 rc = mctp_astlpc_poll(host.astlpc); 1072 assert(rc == 0); 1073 1074 assert(host.astlpc->layout.rx.size == 1075 astlpc_layout_ctx.proto->packet_size(MCTP_PACKET_SIZE(bmtu))); 1076 1077 endpoint_destroy(&host); 1078 endpoint_destroy(&bmc); 1079 free(lpc_mem); 1080 } 1081 1082 static void astlpc_test_send_large_packet(void) 1083 { 1084 struct astlpc_endpoint *bmc, *host; 1085 struct astlpc_test ctx; 1086 uint8_t kcs[2] = { 0 }; 1087 void *lpc_mem; 1088 int rc; 1089 1090 host = &ctx.host; 1091 bmc = &ctx.bmc; 1092 1093 /* Test harness initialisation */ 1094 lpc_mem = calloc(1, 1 * 1024 * 1024); 1095 assert(lpc_mem); 1096 1097 /* BMC initialisation */ 1098 rc = endpoint_init(bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, 8192, &kcs, 1099 lpc_mem); 1100 assert(!rc); 1101 1102 /* Host initialisation */ 1103 rc = endpoint_init(host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, 8192, &kcs, 1104 lpc_mem); 1105 assert(!rc); 1106 1107 ctx.count = 0; 1108 mctp_set_rx_all(bmc->mctp, rx_message, &ctx); 1109 1110 rc = mctp_astlpc_poll(bmc->astlpc); 1111 assert(rc == 0); 1112 1113 rc = mctp_astlpc_poll(host->astlpc); 1114 assert(rc == 0); 1115 1116 ctx.msg = malloc(2 * MCTP_BODY_SIZE(8192)); 1117 assert(ctx.msg); 1118 1119 memset(ctx.msg, 0x5a, 2 * MCTP_BODY_SIZE(8192)); 1120 1121 rc = mctp_message_tx(host->mctp, 8, ctx.msg, 2 * MCTP_BODY_SIZE(8192)); 1122 assert(rc == 0); 1123 rc = mctp_astlpc_poll(bmc->astlpc); 1124 assert(rc == 0); 1125 rc = mctp_astlpc_poll(host->astlpc); 1126 assert(rc == 0); 1127 rc = mctp_astlpc_poll(bmc->astlpc); 1128 assert(rc == 0); 1129 rc = mctp_astlpc_poll(host->astlpc); 1130 assert(rc == 0); 1131 1132 assert(ctx.count == 1); 1133 1134 free(ctx.msg); 1135 endpoint_destroy(host); 1136 endpoint_destroy(bmc); 1137 free(lpc_mem); 1138 } 1139 1140 static void astlpc_test_tx_before_channel_init(void) 1141 { 1142 struct astlpc_endpoint *bmc; 1143 struct astlpc_test ctx; 1144 uint8_t kcs[2] = { 0 }; 1145 uint8_t msg[MCTP_BTU]; 1146 void *lpc_mem; 1147 int rc; 1148 1149 bmc = &ctx.bmc; 1150 1151 /* Test harness initialisation */ 1152 lpc_mem = calloc(1, 1 * 1024 * 1024); 1153 assert(lpc_mem); 1154 1155 /* BMC initialisation */ 1156 rc = endpoint_init(bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, 0, &kcs, 1157 lpc_mem); 1158 assert(!rc); 1159 1160 memset(msg, '\0', sizeof(msg)); 1161 1162 /* 1163 * There was once a bug where the calculated MTU was 0 and the 1164 * packetisation loop in mctp_message_tx_on_bus() allocated all the 1165 * memory. Catch the bug and avoid OOMing the test machine by 1166 * terminating after a period long enough to packetise the message. 1167 */ 1168 alarm(1); 1169 mctp_message_tx(bmc->mctp, 9, msg, sizeof(msg)); 1170 alarm(0); 1171 1172 endpoint_destroy(bmc); 1173 free(lpc_mem); 1174 } 1175 1176 /* clang-format off */ 1177 #define TEST_CASE(test) { #test, test } 1178 static const struct { 1179 const char *name; 1180 void (*test)(void); 1181 } astlpc_tests[] = { 1182 TEST_CASE(astlpc_test_simple_init), 1183 TEST_CASE(astlpc_test_bad_version), 1184 TEST_CASE(astlpc_test_incompatible_versions), 1185 TEST_CASE(astlpc_test_choose_bmc_ver_cur), 1186 TEST_CASE(astlpc_test_choose_host_ver_cur), 1187 TEST_CASE(astlpc_test_version_host_fails_negotiation), 1188 TEST_CASE(astlpc_test_version_bmc_fails_negotiation), 1189 TEST_CASE(astlpc_test_host_before_bmc), 1190 TEST_CASE(astlpc_test_simple_message_bmc_to_host), 1191 TEST_CASE(astlpc_test_simple_message_host_to_bmc), 1192 TEST_CASE(astlpc_test_packetised_message_bmc_to_host), 1193 TEST_CASE(astlpc_test_simple_indirect_message_bmc_to_host), 1194 TEST_CASE(astlpc_test_host_tx_bmc_gone), 1195 TEST_CASE(astlpc_test_poll_not_ready), 1196 TEST_CASE(astlpc_test_undefined_command), 1197 TEST_CASE(astlpc_test_buffers_rx_offset_overflow), 1198 TEST_CASE(astlpc_test_buffers_tx_offset_overflow), 1199 TEST_CASE(astlpc_test_buffers_rx_size_overflow), 1200 TEST_CASE(astlpc_test_buffers_tx_size_overflow), 1201 TEST_CASE(astlpc_test_buffers_rx_window_violation), 1202 TEST_CASE(astlpc_test_buffers_tx_window_violation), 1203 TEST_CASE(astlpc_test_buffers_rx_size_fails_btu), 1204 TEST_CASE(astlpc_test_buffers_tx_size_fails_btu), 1205 TEST_CASE(astlpc_test_buffers_overlap_rx_low), 1206 TEST_CASE(astlpc_test_buffers_overlap_tx_low), 1207 TEST_CASE(astlpc_test_buffers_bad_host_proposal), 1208 TEST_CASE(astlpc_test_buffers_bad_bmc_proposal), 1209 TEST_CASE(astlpc_test_buffers_bad_bmc_negotiation), 1210 TEST_CASE(astlpc_test_buffers_overlap_exact), 1211 TEST_CASE(astlpc_test_buffers_overlap_control), 1212 TEST_CASE(astlpc_test_buffers_bad_host_init), 1213 TEST_CASE(astlpc_test_negotiate_increased_mtu), 1214 TEST_CASE(astlpc_test_negotiate_mtu_low_high), 1215 TEST_CASE(astlpc_test_send_large_packet), 1216 TEST_CASE(astlpc_test_tx_before_channel_init), 1217 }; 1218 /* clang-format on */ 1219 1220 #ifndef BUILD_ASSERT 1221 #define BUILD_ASSERT(x) \ 1222 do { \ 1223 (void)sizeof(char[0 - (!(x))]); \ 1224 } while (0) 1225 #endif 1226 1227 int main(void) 1228 { 1229 size_t i; 1230 1231 mctp_set_log_stdio(MCTP_LOG_DEBUG); 1232 1233 BUILD_ASSERT(ARRAY_SIZE(astlpc_tests) < SIZE_MAX); 1234 for (i = 0; i < ARRAY_SIZE(astlpc_tests); i++) { 1235 mctp_prlog(MCTP_LOG_DEBUG, "begin: %s", astlpc_tests[i].name); 1236 astlpc_tests[i].test(); 1237 mctp_prlog(MCTP_LOG_DEBUG, "end: %s\n", astlpc_tests[i].name); 1238 } 1239 1240 return 0; 1241 } 1242