1 /* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */ 2 3 #ifdef HAVE_CONFIG_H 4 #include "config.h" 5 #endif 6 7 #define ASTLPC_VER_CUR 2 8 #include "astlpc.c" 9 10 #ifdef pr_fmt 11 #undef pr_fmt 12 #define pr_fmt(x) "test: " x 13 #endif 14 15 #include "libmctp-astlpc.h" 16 #include "libmctp-log.h" 17 #include "container_of.h" 18 19 #ifdef NDEBUG 20 #undef NDEBUG 21 #endif 22 23 #include <assert.h> 24 #include <limits.h> 25 #include <stdint.h> 26 #include <stdio.h> 27 #include <stdlib.h> 28 #include <string.h> 29 #include <sys/random.h> 30 #include <unistd.h> 31 32 #ifndef ARRAY_SIZE 33 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) 34 #endif 35 36 struct mctp_binding_astlpc_mmio { 37 struct mctp_binding_astlpc astlpc; 38 bool bmc; 39 40 uint8_t (*kcs)[2]; 41 42 size_t lpc_size; 43 uint8_t *lpc; 44 }; 45 46 struct astlpc_endpoint { 47 struct mctp_binding_astlpc_mmio mmio; 48 struct mctp_binding_astlpc *astlpc; 49 struct mctp *mctp; 50 }; 51 52 struct astlpc_test { 53 struct astlpc_endpoint bmc; 54 struct astlpc_endpoint host; 55 uint8_t kcs[2]; 56 uint8_t *lpc_mem; 57 58 void *msg; 59 uint8_t count; 60 }; 61 62 #define binding_to_mmio(b) \ 63 container_of(b, struct mctp_binding_astlpc_mmio, astlpc) 64 65 static int mctp_astlpc_mmio_kcs_read(void *data, 66 enum mctp_binding_astlpc_kcs_reg reg, 67 uint8_t *val) 68 { 69 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 70 71 *val = (*mmio->kcs)[reg]; 72 73 mctp_prdebug("%s: 0x%hhx from %s", __func__, *val, 74 reg ? "status" : "data"); 75 76 if (reg == MCTP_ASTLPC_KCS_REG_DATA) { 77 uint8_t flag = mmio->bmc ? KCS_STATUS_IBF : KCS_STATUS_OBF; 78 (*mmio->kcs)[MCTP_ASTLPC_KCS_REG_STATUS] &= ~flag; 79 } 80 81 return 0; 82 } 83 84 static int mctp_astlpc_mmio_kcs_write(void *data, 85 enum mctp_binding_astlpc_kcs_reg reg, 86 uint8_t val) 87 { 88 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 89 uint8_t *regp; 90 91 assert(reg == MCTP_ASTLPC_KCS_REG_DATA || 92 reg == MCTP_ASTLPC_KCS_REG_STATUS); 93 94 if (reg == MCTP_ASTLPC_KCS_REG_DATA) { 95 uint8_t flag = mmio->bmc ? KCS_STATUS_OBF : KCS_STATUS_IBF; 96 (*mmio->kcs)[MCTP_ASTLPC_KCS_REG_STATUS] |= flag; 97 } 98 99 regp = &(*mmio->kcs)[reg]; 100 if (reg == MCTP_ASTLPC_KCS_REG_STATUS) 101 *regp = (val & ~0xbU) | (val & *regp & 1); 102 else 103 *regp = val; 104 105 mctp_prdebug("%s: 0x%hhx to %s", __func__, val, 106 reg ? "status" : "data"); 107 108 return 0; 109 } 110 111 static const struct mctp_binding_astlpc_ops astlpc_direct_mmio_ops = { 112 .kcs_read = mctp_astlpc_mmio_kcs_read, 113 .kcs_write = mctp_astlpc_mmio_kcs_write, 114 }; 115 116 int mctp_astlpc_mmio_lpc_read(void *data, void *buf, long offset, size_t len) 117 { 118 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 119 120 mctp_prdebug("%s: %zu bytes from 0x%lx", __func__, len, offset); 121 122 assert(offset >= 0L); 123 assert(offset + len < mmio->lpc_size); 124 125 memcpy(buf, mmio->lpc + offset, len); 126 127 return 0; 128 } 129 130 int mctp_astlpc_mmio_lpc_write(void *data, const void *buf, long offset, 131 size_t len) 132 { 133 struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data); 134 135 mctp_prdebug("%s: %zu bytes to 0x%lx", __func__, len, offset); 136 137 assert(offset >= 0L); 138 assert(offset + len < mmio->lpc_size); 139 140 memcpy(mmio->lpc + offset, buf, len); 141 142 return 0; 143 } 144 145 static const struct mctp_binding_astlpc_ops astlpc_indirect_mmio_ops = { 146 .kcs_read = mctp_astlpc_mmio_kcs_read, 147 .kcs_write = mctp_astlpc_mmio_kcs_write, 148 .lpc_read = mctp_astlpc_mmio_lpc_read, 149 .lpc_write = mctp_astlpc_mmio_lpc_write, 150 }; 151 152 #define __unused __attribute__((unused)) 153 154 static void rx_message(uint8_t eid __unused, void *data __unused, void *msg, 155 size_t len) 156 { 157 struct astlpc_test *test = data; 158 159 mctp_prdebug("MCTP message received: msg: %p, len %zd", msg, len); 160 161 assert(len > 0); 162 assert(msg); 163 assert(test); 164 assert(test->msg); 165 assert(!memcmp(test->msg, msg, len)); 166 167 test->count++; 168 } 169 170 static int endpoint_init(struct astlpc_endpoint *ep, mctp_eid_t eid, 171 uint8_t mode, uint32_t mtu, uint8_t (*kcs)[2], 172 void *lpc_mem) 173 { 174 /* 175 * Configure the direction of the KCS interface so we know whether to 176 * set or clear IBF or OBF on writes or reads. 177 */ 178 ep->mmio.bmc = (mode == MCTP_BINDING_ASTLPC_MODE_BMC); 179 180 ep->mctp = mctp_init(); 181 assert(ep->mctp); 182 183 /* Inject KCS registers */ 184 ep->mmio.kcs = kcs; 185 186 /* Initialise the binding */ 187 ep->astlpc = mctp_astlpc_init(mode, mtu, lpc_mem, 188 &astlpc_direct_mmio_ops, &ep->mmio); 189 assert(ep->astlpc); 190 191 return mctp_register_bus(ep->mctp, &ep->astlpc->binding, eid); 192 } 193 194 static void endpoint_destroy(struct astlpc_endpoint *ep) 195 { 196 mctp_astlpc_destroy(ep->astlpc); 197 mctp_destroy(ep->mctp); 198 } 199 200 static void network_init(struct astlpc_test *ctx) 201 { 202 int rc; 203 204 ctx->lpc_mem = calloc(1, 1 * 1024 * 1024); 205 assert(ctx->lpc_mem); 206 207 /* BMC initialisation */ 208 rc = endpoint_init(&ctx->bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 209 &ctx->kcs, ctx->lpc_mem); 210 assert(!rc); 211 assert(ctx->kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_BMC_READY); 212 213 /* Host initialisation */ 214 rc = endpoint_init(&ctx->host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, 215 MCTP_BTU, &ctx->kcs, ctx->lpc_mem); 216 assert(!rc); 217 218 /* BMC processes host channel init request, alerts host */ 219 mctp_astlpc_poll(ctx->bmc.astlpc); 220 assert(ctx->kcs[MCTP_ASTLPC_KCS_REG_STATUS] & 221 KCS_STATUS_CHANNEL_ACTIVE); 222 assert(ctx->kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0xff); 223 224 /* Host dequeues channel init result */ 225 mctp_astlpc_poll(ctx->host.astlpc); 226 } 227 228 static void network_destroy(struct astlpc_test *ctx) 229 { 230 endpoint_destroy(&ctx->bmc); 231 endpoint_destroy(&ctx->host); 232 free(ctx->lpc_mem); 233 } 234 235 static void astlpc_assert_tx_packet(struct astlpc_endpoint *src, 236 const void *expected, size_t len) 237 { 238 const size_t tx_body = src->astlpc->layout.tx.offset + 4 + 4; 239 const void *test = ((char *)src->astlpc->lpc_map) + tx_body; 240 assert(!memcmp(test, expected, len)); 241 } 242 243 static void astlpc_test_packetised_message_bmc_to_host(void) 244 { 245 struct astlpc_test ctx = { 0 }; 246 uint8_t msg[2 * MCTP_BTU]; 247 int rc; 248 249 /* Test harness initialisation */ 250 251 network_init(&ctx); 252 253 memset(&msg[0], 0x5a, MCTP_BTU); 254 memset(&msg[MCTP_BTU], 0xa5, MCTP_BTU); 255 256 ctx.msg = &msg[0]; 257 ctx.count = 0; 258 mctp_set_rx_all(ctx.host.mctp, rx_message, &ctx); 259 260 /* BMC sends a message */ 261 rc = mctp_message_tx(ctx.bmc.mctp, 9, msg, sizeof(msg)); 262 assert(rc == 0); 263 264 /* Host receives the first packet */ 265 mctp_astlpc_poll(ctx.host.astlpc); 266 267 /* BMC dequeues ownership hand-over and sends the queued packet */ 268 rc = mctp_astlpc_poll(ctx.bmc.astlpc); 269 assert(rc == 0); 270 271 /* Host receives the next packet */ 272 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 273 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01); 274 275 astlpc_assert_tx_packet(&ctx.bmc, &msg[MCTP_BTU], MCTP_BTU); 276 277 /* Host receives final packet */ 278 mctp_astlpc_poll(ctx.host.astlpc); 279 assert(ctx.count == 1); 280 281 network_destroy(&ctx); 282 } 283 284 static void astlpc_test_simple_message_host_to_bmc(void) 285 { 286 struct astlpc_test ctx = { 0 }; 287 uint8_t msg[MCTP_BTU]; 288 int rc; 289 290 /* Test harness initialisation */ 291 292 network_init(&ctx); 293 294 memset(&msg[0], 0xa5, MCTP_BTU); 295 296 ctx.msg = &msg[0]; 297 ctx.count = 0; 298 mctp_set_rx_all(ctx.bmc.mctp, rx_message, &ctx); 299 300 /* Host sends the single-packet message */ 301 rc = mctp_message_tx(ctx.host.mctp, 8, msg, sizeof(msg)); 302 assert(rc == 0); 303 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF); 304 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01); 305 306 astlpc_assert_tx_packet(&ctx.host, &msg[0], MCTP_BTU); 307 308 /* BMC receives the single-packet message */ 309 mctp_astlpc_poll(ctx.bmc.astlpc); 310 assert(ctx.count == 1); 311 312 /* BMC returns Tx area ownership to Host */ 313 assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF)); 314 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x02); 315 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 316 317 /* Host dequeues ownership hand-over and sends the queued packet */ 318 rc = mctp_astlpc_poll(ctx.host.astlpc); 319 assert(rc == 0); 320 321 network_destroy(&ctx); 322 } 323 324 static void astlpc_test_simple_message_bmc_to_host(void) 325 { 326 struct astlpc_test ctx = { 0 }; 327 uint8_t msg[MCTP_BTU]; 328 int rc; 329 330 /* Test harness initialisation */ 331 332 network_init(&ctx); 333 334 memset(&msg[0], 0x5a, MCTP_BTU); 335 336 ctx.msg = &msg[0]; 337 ctx.count = 0; 338 mctp_set_rx_all(ctx.host.mctp, rx_message, &ctx); 339 340 /* BMC sends the single-packet message */ 341 rc = mctp_message_tx(ctx.bmc.mctp, 9, msg, sizeof(msg)); 342 assert(rc == 0); 343 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 344 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01); 345 346 astlpc_assert_tx_packet(&ctx.bmc, &msg[0], MCTP_BTU); 347 348 /* Host receives the single-packet message */ 349 mctp_astlpc_poll(ctx.host.astlpc); 350 assert(ctx.count == 1); 351 352 /* Host returns Rx area ownership to BMC */ 353 assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF)); 354 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x02); 355 assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF); 356 357 /* BMC dequeues ownership hand-over and sends the queued packet */ 358 rc = mctp_astlpc_poll(ctx.bmc.astlpc); 359 assert(rc == 0); 360 361 network_destroy(&ctx); 362 } 363 364 static void astlpc_test_host_before_bmc(void) 365 { 366 struct mctp_binding_astlpc_mmio mmio = { 0 }; 367 struct mctp_binding_astlpc *astlpc; 368 uint8_t kcs[2] = { 0 }; 369 struct mctp *mctp; 370 int rc; 371 372 mctp = mctp_init(); 373 assert(mctp); 374 375 /* Inject KCS registers */ 376 mmio.kcs = &kcs; 377 378 /* Initialise the binding */ 379 astlpc = mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, NULL, 380 &astlpc_direct_mmio_ops, &mmio); 381 382 /* Register the binding to trigger the start-up sequence */ 383 rc = mctp_register_bus(mctp, &astlpc->binding, 8); 384 385 /* Start-up should fail as we haven't initialised the BMC */ 386 assert(rc < 0); 387 388 mctp_astlpc_destroy(astlpc); 389 mctp_destroy(mctp); 390 } 391 392 static void astlpc_test_bad_version(void) 393 { 394 assert(0 == 395 mctp_astlpc_negotiate_version(ASTLPC_VER_BAD, ASTLPC_VER_CUR, 396 ASTLPC_VER_MIN, ASTLPC_VER_CUR)); 397 assert(0 == 398 mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_BAD, 399 ASTLPC_VER_MIN, ASTLPC_VER_CUR)); 400 assert(0 == 401 mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_CUR, 402 ASTLPC_VER_BAD, ASTLPC_VER_CUR)); 403 assert(0 == 404 mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_CUR, 405 ASTLPC_VER_MIN, ASTLPC_VER_BAD)); 406 assert(0 == mctp_astlpc_negotiate_version( 407 ASTLPC_VER_CUR + 1, ASTLPC_VER_CUR, ASTLPC_VER_MIN, 408 ASTLPC_VER_CUR + 1)); 409 assert(0 == mctp_astlpc_negotiate_version( 410 ASTLPC_VER_MIN, ASTLPC_VER_CUR + 1, 411 ASTLPC_VER_CUR + 1, ASTLPC_VER_CUR)); 412 } 413 414 static void astlpc_test_incompatible_versions(void) 415 { 416 assert(0 == mctp_astlpc_negotiate_version( 417 ASTLPC_VER_CUR, ASTLPC_VER_CUR, ASTLPC_VER_CUR + 1, 418 ASTLPC_VER_CUR + 1)); 419 assert(0 == mctp_astlpc_negotiate_version( 420 ASTLPC_VER_CUR + 1, ASTLPC_VER_CUR + 1, 421 ASTLPC_VER_CUR, ASTLPC_VER_CUR)); 422 } 423 424 static void astlpc_test_choose_bmc_ver_cur(void) 425 { 426 assert(2 == mctp_astlpc_negotiate_version(1, 2, 2, 3)); 427 } 428 429 static void astlpc_test_choose_host_ver_cur(void) 430 { 431 assert(2 == mctp_astlpc_negotiate_version(2, 3, 1, 2)); 432 } 433 434 static void astlpc_test_version_host_fails_negotiation(void) 435 { 436 struct astlpc_endpoint bmc, host; 437 struct mctp_lpcmap_hdr *hdr; 438 uint8_t kcs[2] = { 0 }; 439 void *lpc_mem; 440 int rc; 441 442 /* Test harness initialisation */ 443 lpc_mem = calloc(1, 1 * 1024 * 1024); 444 assert(lpc_mem); 445 446 /* BMC initialisation */ 447 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 448 &kcs, lpc_mem); 449 assert(!rc); 450 451 /* Now the BMC is initialised, break its version announcement */ 452 hdr = lpc_mem; 453 hdr->bmc_ver_cur = ASTLPC_VER_BAD; 454 455 /* Host initialisation */ 456 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 457 &kcs, lpc_mem); 458 assert(rc < 0); 459 460 endpoint_destroy(&bmc); 461 endpoint_destroy(&host); 462 free(lpc_mem); 463 } 464 465 static void astlpc_test_version_bmc_fails_negotiation(void) 466 { 467 struct astlpc_endpoint bmc, host; 468 struct mctp_lpcmap_hdr *hdr; 469 uint8_t kcs[2] = { 0 }; 470 void *lpc_mem; 471 int rc; 472 473 /* Test harness initialisation */ 474 lpc_mem = calloc(1, 1 * 1024 * 1024); 475 assert(lpc_mem); 476 477 /* BMC initialisation */ 478 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 479 &kcs, lpc_mem); 480 assert(!rc); 481 482 /* Host initialisation */ 483 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 484 &kcs, lpc_mem); 485 assert(!rc); 486 487 /* Now the host is initialised, break its version announcement */ 488 hdr = lpc_mem; 489 hdr->host_ver_cur = ASTLPC_VER_BAD; 490 491 /* Poll the BMC to detect the broken host version */ 492 mctp_astlpc_poll(bmc.astlpc); 493 assert(!(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_CHANNEL_ACTIVE)); 494 495 /* Poll the host so it detects failed negotiation */ 496 rc = mctp_astlpc_poll(host.astlpc); 497 assert(rc < 0); 498 499 endpoint_destroy(&bmc); 500 endpoint_destroy(&host); 501 free(lpc_mem); 502 } 503 504 static void astlpc_test_simple_init(void) 505 { 506 struct astlpc_endpoint bmc, host; 507 uint8_t kcs[2] = { 0 }; 508 void *lpc_mem; 509 int rc; 510 511 /* Test harness initialisation */ 512 lpc_mem = calloc(1, 1 * 1024 * 1024); 513 assert(lpc_mem); 514 515 /* BMC initialisation */ 516 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 517 &kcs, lpc_mem); 518 assert(!rc); 519 520 /* Verify the BMC binding was initialised */ 521 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_BMC_READY); 522 523 /* Host initialisation */ 524 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 525 &kcs, lpc_mem); 526 assert(!rc); 527 528 /* Host sends channel init command */ 529 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF); 530 assert(kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x00); 531 532 /* BMC receives host channel init request */ 533 mctp_astlpc_poll(bmc.astlpc); 534 535 /* BMC sends init response */ 536 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF); 537 assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_CHANNEL_ACTIVE); 538 assert(kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0xff); 539 540 /* Host dequeues data */ 541 mctp_astlpc_poll(host.astlpc); 542 543 endpoint_destroy(&bmc); 544 endpoint_destroy(&host); 545 free(lpc_mem); 546 } 547 548 static void astlpc_test_simple_indirect_message_bmc_to_host(void) 549 { 550 struct astlpc_test ctx = { 0 }; 551 uint8_t kcs[2] = { 0 }; 552 uint8_t msg[MCTP_BTU]; 553 int rc; 554 555 ctx.lpc_mem = calloc(1, LPC_WIN_SIZE); 556 assert(ctx.lpc_mem); 557 558 /* Test message data */ 559 memset(&msg[0], 0x5a, MCTP_BTU); 560 561 /* Manually set up the network so we can inject the indirect ops */ 562 563 /* BMC initialisation */ 564 ctx.bmc.mmio.bmc = true; 565 ctx.bmc.mctp = mctp_init(); 566 assert(ctx.bmc.mctp); 567 ctx.bmc.mmio.kcs = &kcs; 568 ctx.bmc.mmio.lpc = ctx.lpc_mem; 569 ctx.bmc.mmio.lpc_size = LPC_WIN_SIZE; 570 ctx.bmc.astlpc = 571 mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, NULL, 572 &astlpc_indirect_mmio_ops, &ctx.bmc.mmio); 573 mctp_register_bus(ctx.bmc.mctp, &ctx.bmc.astlpc->binding, 8); 574 575 /* Host initialisation */ 576 ctx.host.mmio.bmc = false; 577 ctx.host.mctp = mctp_init(); 578 assert(ctx.host.mctp); 579 ctx.host.mmio.kcs = &kcs; 580 ctx.host.mmio.lpc = ctx.lpc_mem; 581 ctx.host.mmio.lpc_size = LPC_WIN_SIZE; 582 ctx.host.astlpc = 583 mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, NULL, 584 &astlpc_indirect_mmio_ops, &ctx.host.mmio); 585 mctp_register_bus(ctx.host.mctp, &ctx.host.astlpc->binding, 9); 586 587 /* BMC processes host channel init request, alerts host */ 588 mctp_astlpc_poll(ctx.bmc.astlpc); 589 590 /* Host dequeues channel init result */ 591 mctp_astlpc_poll(ctx.host.astlpc); 592 593 ctx.msg = &msg[0]; 594 ctx.count = 0; 595 mctp_set_rx_all(ctx.host.mctp, rx_message, &ctx); 596 597 /* BMC sends the single-packet message */ 598 rc = mctp_message_tx(ctx.bmc.mctp, 9, msg, sizeof(msg)); 599 assert(rc == 0); 600 601 /* Host receives the single-packet message */ 602 rc = mctp_astlpc_poll(ctx.host.astlpc); 603 assert(rc == 0); 604 assert(ctx.count == 1); 605 606 /* BMC dequeues ownership hand-over and sends the queued packet */ 607 rc = mctp_astlpc_poll(ctx.bmc.astlpc); 608 assert(rc == 0); 609 610 /* Can still tear-down the network in the normal fashion */ 611 network_destroy(&ctx); 612 } 613 614 static void astlpc_test_host_tx_bmc_gone(void) 615 { 616 struct astlpc_test ctx = { 0 }; 617 uint8_t unwritten[MCTP_BTU]; 618 uint8_t msg[MCTP_BTU]; 619 int rc; 620 621 /* Test harness initialisation */ 622 623 network_init(&ctx); 624 625 memset(&msg[0], 0x5a, sizeof(msg)); 626 memset(&unwritten[0], 0, sizeof(unwritten)); 627 628 ctx.msg = &msg[0]; 629 ctx.count = 0; 630 631 /* Clear bmc-ready */ 632 endpoint_destroy(&ctx.bmc); 633 634 /* Host detects that the BMC is disabled */ 635 mctp_astlpc_poll(ctx.host.astlpc); 636 637 /* Host attempts to send the single-packet message, but is prevented */ 638 rc = mctp_message_tx(ctx.host.mctp, 8, msg, sizeof(msg)); 639 assert(rc == 0); 640 assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF)); 641 astlpc_assert_tx_packet(&ctx.host, &unwritten[0], MCTP_BTU); 642 643 /* BMC comes back */ 644 rc = endpoint_init(&ctx.bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 645 &ctx.kcs, ctx.lpc_mem); 646 assert(!rc); 647 mctp_set_rx_all(ctx.bmc.mctp, rx_message, &ctx); 648 649 /* Host triggers channel init */ 650 mctp_astlpc_poll(ctx.host.astlpc); 651 652 /* BMC handles channel init */ 653 mctp_astlpc_poll(ctx.bmc.astlpc); 654 655 /* Host completes channel init, flushing the Tx queue */ 656 mctp_astlpc_poll(ctx.host.astlpc); 657 658 /* BMC receives the single-packet message */ 659 mctp_astlpc_poll(ctx.bmc.astlpc); 660 assert(ctx.count == 1); 661 662 network_destroy(&ctx); 663 } 664 665 static void astlpc_test_poll_not_ready(void) 666 { 667 struct astlpc_endpoint bmc; 668 uint8_t kcs[2] = { 0 }; 669 void *lpc_mem; 670 int rc; 671 672 /* Test harness initialisation */ 673 lpc_mem = calloc(1, 1 * 1024 * 1024); 674 assert(lpc_mem); 675 676 /* BMC initialisation */ 677 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 678 &kcs, lpc_mem); 679 assert(!rc); 680 681 /* Check for a command despite none present */ 682 rc = mctp_astlpc_poll(bmc.astlpc); 683 684 /* Make sure it doesn't fail */ 685 assert(rc == 0); 686 687 endpoint_destroy(&bmc); 688 free(lpc_mem); 689 } 690 691 static void astlpc_test_undefined_command(void) 692 { 693 struct astlpc_endpoint bmc; 694 uint8_t kcs[2] = { 0 }; 695 void *lpc_mem; 696 int rc; 697 698 /* Test harness initialisation */ 699 lpc_mem = calloc(1, 1 * 1024 * 1024); 700 assert(lpc_mem); 701 702 /* BMC initialisation */ 703 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 704 &kcs, lpc_mem); 705 assert(!rc); 706 707 /* 0x5a isn't legal in v1 or v2 */ 708 kcs[MCTP_ASTLPC_KCS_REG_DATA] = 0x5a; 709 kcs[MCTP_ASTLPC_KCS_REG_STATUS] |= KCS_STATUS_IBF; 710 711 /* Check for a command despite none present */ 712 rc = mctp_astlpc_poll(bmc.astlpc); 713 714 /* Make sure it doesn't fail, bad command should be discarded */ 715 assert(rc == 0); 716 717 endpoint_destroy(&bmc); 718 free(lpc_mem); 719 } 720 721 #define BUFFER_MIN ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(MCTP_BTU)) 722 723 static void astlpc_test_buffers_rx_offset_overflow(void) 724 { 725 struct mctp_astlpc_layout l = { 726 .rx = { UINT32_MAX, BUFFER_MIN }, 727 .tx = { control_size, BUFFER_MIN }, 728 }; 729 730 assert(!mctp_astlpc_layout_validate(&l)); 731 } 732 733 static void astlpc_test_buffers_tx_offset_overflow(void) 734 { 735 struct mctp_astlpc_layout l = { 736 .rx = { control_size, BUFFER_MIN }, 737 .tx = { UINT32_MAX, BUFFER_MIN }, 738 }; 739 740 assert(!mctp_astlpc_layout_validate(&l)); 741 } 742 743 static void astlpc_test_buffers_rx_size_overflow(void) 744 { 745 struct mctp_astlpc_layout l = { 746 .rx = { control_size + BUFFER_MIN, UINT32_MAX }, 747 .tx = { control_size, BUFFER_MIN }, 748 }; 749 750 assert(!mctp_astlpc_layout_validate(&l)); 751 } 752 753 static void astlpc_test_buffers_tx_size_overflow(void) 754 { 755 struct mctp_astlpc_layout l = { 756 .rx = { control_size, BUFFER_MIN }, 757 .tx = { control_size + BUFFER_MIN, UINT32_MAX }, 758 }; 759 760 assert(!mctp_astlpc_layout_validate(&l)); 761 } 762 763 static void astlpc_test_buffers_rx_window_violation(void) 764 { 765 struct mctp_astlpc_layout l = { 766 .rx = { LPC_WIN_SIZE - BUFFER_MIN + 1, BUFFER_MIN }, 767 .tx = { control_size, BUFFER_MIN }, 768 }; 769 770 assert(!mctp_astlpc_layout_validate(&l)); 771 } 772 773 static void astlpc_test_buffers_tx_window_violation(void) 774 { 775 struct mctp_astlpc_layout l = { 776 .rx = { control_size, BUFFER_MIN }, 777 .tx = { LPC_WIN_SIZE - BUFFER_MIN + 1, BUFFER_MIN }, 778 }; 779 780 assert(!mctp_astlpc_layout_validate(&l)); 781 } 782 783 static void astlpc_test_buffers_rx_size_fails_btu(void) 784 { 785 struct mctp_astlpc_layout l = { 786 .rx = { control_size, BUFFER_MIN - 1 }, 787 .tx = { control_size + BUFFER_MIN, BUFFER_MIN }, 788 }; 789 790 assert(!mctp_astlpc_layout_validate(&l)); 791 } 792 793 static void astlpc_test_buffers_tx_size_fails_btu(void) 794 { 795 struct mctp_astlpc_layout l = { 796 .rx = { control_size, BUFFER_MIN }, 797 .tx = { control_size + BUFFER_MIN, BUFFER_MIN - 1 }, 798 }; 799 800 assert(!mctp_astlpc_layout_validate(&l)); 801 } 802 803 static void astlpc_test_buffers_overlap_rx_low(void) 804 { 805 struct mctp_astlpc_layout l = { 806 .rx = { control_size, 2 * BUFFER_MIN }, 807 .tx = { control_size + BUFFER_MIN, 2 * BUFFER_MIN }, 808 }; 809 810 assert(!mctp_astlpc_layout_validate(&l)); 811 } 812 813 static void astlpc_test_buffers_overlap_tx_low(void) 814 { 815 struct mctp_astlpc_layout l = { 816 .rx = { control_size + BUFFER_MIN, 2 * BUFFER_MIN }, 817 .tx = { control_size, 2 * BUFFER_MIN }, 818 }; 819 820 assert(!mctp_astlpc_layout_validate(&l)); 821 } 822 823 static void astlpc_test_buffers_overlap_exact(void) 824 { 825 struct mctp_astlpc_layout l = { 826 .rx = { control_size, 2 * BUFFER_MIN }, 827 .tx = { control_size, 2 * BUFFER_MIN }, 828 }; 829 830 assert(!mctp_astlpc_layout_validate(&l)); 831 } 832 833 static void astlpc_test_buffers_overlap_control(void) 834 { 835 struct mctp_astlpc_layout l = { 836 .rx = { 0, BUFFER_MIN }, 837 .tx = { control_size + BUFFER_MIN, BUFFER_MIN }, 838 }; 839 840 assert(!mctp_astlpc_layout_validate(&l)); 841 } 842 843 static void astlpc_test_buffers_bad_host_proposal(void) 844 { 845 struct astlpc_endpoint bmc, host; 846 struct mctp_lpcmap_hdr *hdr; 847 uint8_t kcs[2] = { 0 }; 848 void *lpc_mem; 849 int rc; 850 851 /* Test harness initialisation */ 852 lpc_mem = calloc(1, 1 * 1024 * 1024); 853 assert(lpc_mem); 854 855 /* BMC initialisation */ 856 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 857 &kcs, lpc_mem); 858 assert(!rc); 859 860 /* Host initialisation */ 861 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 862 &kcs, lpc_mem); 863 assert(!rc); 864 865 /* 866 * Now that the host has initialised the control area, break 867 * something before polling the BMC 868 */ 869 hdr = lpc_mem; 870 hdr->layout.rx_size = 0; 871 872 mctp_astlpc_poll(bmc.astlpc); 873 874 /* Make sure the BMC has not set the channel to active */ 875 assert(!(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_CHANNEL_ACTIVE)); 876 877 endpoint_destroy(&host); 878 endpoint_destroy(&bmc); 879 free(lpc_mem); 880 } 881 882 static void astlpc_test_buffers_bad_bmc_proposal(void) 883 { 884 struct astlpc_endpoint bmc, host; 885 struct mctp_lpcmap_hdr *hdr; 886 uint8_t kcs[2] = { 0 }; 887 void *lpc_mem; 888 int rc; 889 890 /* Test harness initialisation */ 891 lpc_mem = calloc(1, 1 * 1024 * 1024); 892 assert(lpc_mem); 893 894 /* BMC initialisation */ 895 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 896 &kcs, lpc_mem); 897 assert(!rc); 898 899 /* 900 * Now that the BMC has initialised the control area, break something 901 * before initialising the host 902 */ 903 hdr = lpc_mem; 904 hdr->layout.rx_size = 0; 905 906 /* Host initialisation: Fails due to bad layout */ 907 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 908 &kcs, lpc_mem); 909 assert(rc < 0); 910 911 endpoint_destroy(&host); 912 endpoint_destroy(&bmc); 913 free(lpc_mem); 914 } 915 916 static void astlpc_test_buffers_bad_bmc_negotiation(void) 917 { 918 struct astlpc_endpoint bmc, host; 919 struct mctp_lpcmap_hdr *hdr; 920 uint8_t kcs[2] = { 0 }; 921 void *lpc_mem; 922 int rc; 923 924 /* Test harness initialisation */ 925 lpc_mem = calloc(1, 1 * 1024 * 1024); 926 assert(lpc_mem); 927 928 /* BMC initialisation */ 929 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, 930 &kcs, lpc_mem); 931 assert(!rc); 932 933 /* Host initialisation */ 934 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, 935 &kcs, lpc_mem); 936 assert(!rc); 937 938 mctp_astlpc_poll(bmc.astlpc); 939 940 /* 941 * Now that the BMC has initialised the control area, break something 942 * before polling the host 943 */ 944 hdr = lpc_mem; 945 hdr->layout.rx_size = 0; 946 947 rc = mctp_astlpc_poll(host.astlpc); 948 assert(rc < 0); 949 950 endpoint_destroy(&host); 951 endpoint_destroy(&bmc); 952 free(lpc_mem); 953 } 954 955 static void astlpc_test_buffers_bad_host_init(void) 956 { 957 struct astlpc_endpoint host; 958 uint8_t kcs[2] = { 0 }; 959 void *lpc_mem; 960 int rc; 961 962 /* Test harness initialisation */ 963 lpc_mem = calloc(1, 1 * 1024 * 1024); 964 assert(lpc_mem); 965 966 host.mctp = mctp_init(); 967 assert(host.mctp); 968 host.mmio.kcs = &kcs; 969 host.mmio.bmc = false; 970 971 /* Set the MTU to 0 to provoke a failure */ 972 host.astlpc = 973 mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_HOST, 0, lpc_mem, 974 &astlpc_direct_mmio_ops, &host.mmio); 975 976 rc = mctp_register_bus(host.mctp, &host.astlpc->binding, 8); 977 assert(rc < 0); 978 979 mctp_astlpc_destroy(host.astlpc); 980 mctp_destroy(host.mctp); 981 free(lpc_mem); 982 } 983 984 static void astlpc_test_negotiate_increased_mtu(void) 985 { 986 struct astlpc_endpoint bmc, host; 987 uint8_t kcs[2] = { 0 }; 988 void *lpc_mem; 989 int rc; 990 991 /* Test harness initialisation */ 992 lpc_mem = calloc(1, 1 * 1024 * 1024); 993 assert(lpc_mem); 994 995 /* BMC initialisation */ 996 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, 3 * MCTP_BTU, 997 &kcs, lpc_mem); 998 assert(!rc); 999 1000 /* Host initialisation */ 1001 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, 1002 2 * MCTP_BTU, &kcs, lpc_mem); 1003 assert(!rc); 1004 1005 rc = mctp_astlpc_poll(bmc.astlpc); 1006 assert(rc == 0); 1007 1008 rc = mctp_astlpc_poll(host.astlpc); 1009 assert(rc == 0); 1010 1011 endpoint_destroy(&host); 1012 endpoint_destroy(&bmc); 1013 free(lpc_mem); 1014 } 1015 1016 static void astlpc_test_negotiate_mtu_low_high(void) 1017 { 1018 struct astlpc_endpoint bmc, host; 1019 uint8_t kcs[2] = { 0 }; 1020 void *lpc_mem; 1021 int rc; 1022 1023 /* Test harness initialisation */ 1024 lpc_mem = calloc(1, 1 * 1024 * 1024); 1025 assert(lpc_mem); 1026 1027 /* BMC initialisation */ 1028 rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, 3 * MCTP_BTU, 1029 &kcs, lpc_mem); 1030 assert(!rc); 1031 1032 /* Host initialisation with low MTU */ 1033 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, 1034 2 * MCTP_BTU, &kcs, lpc_mem); 1035 assert(!rc); 1036 1037 /* Process low MTU proposal */ 1038 rc = mctp_astlpc_poll(bmc.astlpc); 1039 assert(rc == 0); 1040 1041 /* Accept low MTU proposal */ 1042 rc = mctp_astlpc_poll(host.astlpc); 1043 assert(rc == 0); 1044 1045 assert(host.astlpc->layout.rx.size == 1046 ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(2 * MCTP_BTU))); 1047 1048 /* Tear-down the host so we can bring up a new one */ 1049 endpoint_destroy(&host); 1050 1051 /* 1052 * Bring up a new host endpoint with a higher MTU than we previously 1053 * negotiated 1054 */ 1055 rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, 1056 3 * MCTP_BTU, &kcs, lpc_mem); 1057 assert(!rc); 1058 1059 /* Process high MTU proposal */ 1060 rc = mctp_astlpc_poll(bmc.astlpc); 1061 assert(rc == 0); 1062 1063 /* Accept high MTU proposal */ 1064 rc = mctp_astlpc_poll(host.astlpc); 1065 assert(rc == 0); 1066 1067 assert(host.astlpc->layout.rx.size == 1068 ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(3 * MCTP_BTU))); 1069 1070 endpoint_destroy(&host); 1071 endpoint_destroy(&bmc); 1072 free(lpc_mem); 1073 } 1074 1075 static void astlpc_test_send_large_packet(void) 1076 { 1077 struct astlpc_endpoint *bmc, *host; 1078 struct astlpc_test ctx; 1079 uint8_t kcs[2] = { 0 }; 1080 void *lpc_mem; 1081 int rc; 1082 1083 host = &ctx.host; 1084 bmc = &ctx.bmc; 1085 1086 /* Test harness initialisation */ 1087 lpc_mem = calloc(1, 1 * 1024 * 1024); 1088 assert(lpc_mem); 1089 1090 /* BMC initialisation */ 1091 rc = endpoint_init(bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, 8192, &kcs, 1092 lpc_mem); 1093 assert(!rc); 1094 1095 /* Host initialisation */ 1096 rc = endpoint_init(host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, 8192, &kcs, 1097 lpc_mem); 1098 assert(!rc); 1099 1100 ctx.count = 0; 1101 mctp_set_rx_all(bmc->mctp, rx_message, &ctx); 1102 1103 rc = mctp_astlpc_poll(bmc->astlpc); 1104 assert(rc == 0); 1105 1106 rc = mctp_astlpc_poll(host->astlpc); 1107 assert(rc == 0); 1108 1109 ctx.msg = malloc(2 * MCTP_BODY_SIZE(8192)); 1110 assert(ctx.msg); 1111 1112 memset(ctx.msg, 0x5a, 2 * MCTP_BODY_SIZE(8192)); 1113 1114 rc = mctp_message_tx(host->mctp, 8, ctx.msg, 2 * MCTP_BODY_SIZE(8192)); 1115 assert(rc == 0); 1116 rc = mctp_astlpc_poll(bmc->astlpc); 1117 assert(rc == 0); 1118 rc = mctp_astlpc_poll(host->astlpc); 1119 assert(rc == 0); 1120 rc = mctp_astlpc_poll(bmc->astlpc); 1121 assert(rc == 0); 1122 rc = mctp_astlpc_poll(host->astlpc); 1123 assert(rc == 0); 1124 1125 assert(ctx.count == 1); 1126 1127 free(ctx.msg); 1128 endpoint_destroy(host); 1129 endpoint_destroy(bmc); 1130 free(lpc_mem); 1131 } 1132 1133 static void astlpc_test_tx_before_channel_init(void) 1134 { 1135 struct astlpc_endpoint *bmc; 1136 struct astlpc_test ctx; 1137 uint8_t kcs[2] = { 0 }; 1138 uint8_t msg[MCTP_BTU]; 1139 void *lpc_mem; 1140 int rc; 1141 1142 bmc = &ctx.bmc; 1143 1144 /* Test harness initialisation */ 1145 lpc_mem = calloc(1, 1 * 1024 * 1024); 1146 assert(lpc_mem); 1147 1148 /* BMC initialisation */ 1149 rc = endpoint_init(bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, 0, &kcs, 1150 lpc_mem); 1151 assert(!rc); 1152 1153 memset(msg, '\0', sizeof(msg)); 1154 1155 /* 1156 * There was once a bug where the calculated MTU was 0 and the 1157 * packetisation loop in mctp_message_tx_on_bus() allocated all the 1158 * memory. Catch the bug and avoid OOMing the test machine by 1159 * terminating after a period long enough to packetise the message. 1160 */ 1161 alarm(1); 1162 mctp_message_tx(bmc->mctp, 9, msg, sizeof(msg)); 1163 alarm(0); 1164 1165 endpoint_destroy(bmc); 1166 free(lpc_mem); 1167 } 1168 1169 /* clang-format off */ 1170 #define TEST_CASE(test) { #test, test } 1171 static const struct { 1172 const char *name; 1173 void (*test)(void); 1174 } astlpc_tests[] = { 1175 TEST_CASE(astlpc_test_simple_init), 1176 TEST_CASE(astlpc_test_bad_version), 1177 TEST_CASE(astlpc_test_incompatible_versions), 1178 TEST_CASE(astlpc_test_choose_bmc_ver_cur), 1179 TEST_CASE(astlpc_test_choose_host_ver_cur), 1180 TEST_CASE(astlpc_test_version_host_fails_negotiation), 1181 TEST_CASE(astlpc_test_version_bmc_fails_negotiation), 1182 TEST_CASE(astlpc_test_host_before_bmc), 1183 TEST_CASE(astlpc_test_simple_message_bmc_to_host), 1184 TEST_CASE(astlpc_test_simple_message_host_to_bmc), 1185 TEST_CASE(astlpc_test_packetised_message_bmc_to_host), 1186 TEST_CASE(astlpc_test_simple_indirect_message_bmc_to_host), 1187 TEST_CASE(astlpc_test_host_tx_bmc_gone), 1188 TEST_CASE(astlpc_test_poll_not_ready), 1189 TEST_CASE(astlpc_test_undefined_command), 1190 TEST_CASE(astlpc_test_buffers_rx_offset_overflow), 1191 TEST_CASE(astlpc_test_buffers_tx_offset_overflow), 1192 TEST_CASE(astlpc_test_buffers_rx_size_overflow), 1193 TEST_CASE(astlpc_test_buffers_tx_size_overflow), 1194 TEST_CASE(astlpc_test_buffers_rx_window_violation), 1195 TEST_CASE(astlpc_test_buffers_tx_window_violation), 1196 TEST_CASE(astlpc_test_buffers_rx_size_fails_btu), 1197 TEST_CASE(astlpc_test_buffers_tx_size_fails_btu), 1198 TEST_CASE(astlpc_test_buffers_overlap_rx_low), 1199 TEST_CASE(astlpc_test_buffers_overlap_tx_low), 1200 TEST_CASE(astlpc_test_buffers_bad_host_proposal), 1201 TEST_CASE(astlpc_test_buffers_bad_bmc_proposal), 1202 TEST_CASE(astlpc_test_buffers_bad_bmc_negotiation), 1203 TEST_CASE(astlpc_test_buffers_overlap_exact), 1204 TEST_CASE(astlpc_test_buffers_overlap_control), 1205 TEST_CASE(astlpc_test_buffers_bad_host_init), 1206 TEST_CASE(astlpc_test_negotiate_increased_mtu), 1207 TEST_CASE(astlpc_test_negotiate_mtu_low_high), 1208 TEST_CASE(astlpc_test_send_large_packet), 1209 TEST_CASE(astlpc_test_tx_before_channel_init), 1210 }; 1211 /* clang-format on */ 1212 1213 #ifndef BUILD_ASSERT 1214 #define BUILD_ASSERT(x) \ 1215 do { \ 1216 (void)sizeof(char[0 - (!(x))]); \ 1217 } while (0) 1218 #endif 1219 1220 int main(void) 1221 { 1222 size_t i; 1223 1224 mctp_set_log_stdio(MCTP_LOG_DEBUG); 1225 1226 BUILD_ASSERT(ARRAY_SIZE(astlpc_tests) < SIZE_MAX); 1227 for (i = 0; i < ARRAY_SIZE(astlpc_tests); i++) { 1228 mctp_prlog(MCTP_LOG_DEBUG, "begin: %s", astlpc_tests[i].name); 1229 astlpc_tests[i].test(); 1230 mctp_prlog(MCTP_LOG_DEBUG, "end: %s\n", astlpc_tests[i].name); 1231 } 1232 1233 return 0; 1234 } 1235