xref: /openbmc/libmctp/tests/test_astlpc.c (revision 3ef47785)
1 /* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */
2 
3 #ifdef HAVE_CONFIG_H
4 #include "config.h"
5 #endif
6 
7 #define ASTLPC_VER_CUR 3
8 #include "astlpc.c"
9 
10 #ifdef pr_fmt
11 #undef pr_fmt
12 #define pr_fmt(x) "test: " x
13 #endif
14 
15 #include "compiler.h"
16 #include "container_of.h"
17 #include "libmctp-astlpc.h"
18 #include "libmctp-log.h"
19 
20 #ifdef NDEBUG
21 #undef NDEBUG
22 #endif
23 
24 #include <assert.h>
25 #include <limits.h>
26 #include <stdint.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/random.h>
31 #include <unistd.h>
32 
33 #ifndef ARRAY_SIZE
34 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
35 #endif
36 
37 struct mctp_binding_astlpc_mmio {
38 	struct mctp_binding_astlpc astlpc;
39 	bool bmc;
40 
41 	uint8_t (*kcs)[2];
42 
43 	size_t lpc_size;
44 	uint8_t *lpc;
45 };
46 
47 struct astlpc_endpoint {
48 	struct mctp_binding_astlpc_mmio mmio;
49 	struct mctp_binding_astlpc *astlpc;
50 	struct mctp *mctp;
51 };
52 
53 struct astlpc_test {
54 	struct astlpc_endpoint bmc;
55 	struct astlpc_endpoint host;
56 	uint8_t kcs[2];
57 	uint8_t *lpc_mem;
58 
59 	void *msg;
60 	uint8_t count;
61 };
62 
63 #define binding_to_mmio(b)                                                     \
64 	container_of(b, struct mctp_binding_astlpc_mmio, astlpc)
65 
66 static int mctp_astlpc_mmio_kcs_read(void *data,
67 				     enum mctp_binding_astlpc_kcs_reg reg,
68 				     uint8_t *val)
69 {
70 	struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data);
71 
72 	*val = (*mmio->kcs)[reg];
73 
74 	mctp_prdebug("%s: 0x%hhx from %s", __func__, *val,
75 		     reg ? "status" : "data");
76 
77 	if (reg == MCTP_ASTLPC_KCS_REG_DATA) {
78 		uint8_t flag = mmio->bmc ? KCS_STATUS_IBF : KCS_STATUS_OBF;
79 		(*mmio->kcs)[MCTP_ASTLPC_KCS_REG_STATUS] &= ~flag;
80 	}
81 
82 	return 0;
83 }
84 
85 static int mctp_astlpc_mmio_kcs_write(void *data,
86 				      enum mctp_binding_astlpc_kcs_reg reg,
87 				      uint8_t val)
88 {
89 	struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data);
90 	uint8_t *regp;
91 
92 	assert(reg == MCTP_ASTLPC_KCS_REG_DATA ||
93 	       reg == MCTP_ASTLPC_KCS_REG_STATUS);
94 
95 	if (reg == MCTP_ASTLPC_KCS_REG_DATA) {
96 		uint8_t flag = mmio->bmc ? KCS_STATUS_OBF : KCS_STATUS_IBF;
97 		(*mmio->kcs)[MCTP_ASTLPC_KCS_REG_STATUS] |= flag;
98 	}
99 
100 	regp = &(*mmio->kcs)[reg];
101 	if (reg == MCTP_ASTLPC_KCS_REG_STATUS)
102 		*regp = (val & ~0xbU) | (val & *regp & 1);
103 	else
104 		*regp = val;
105 
106 	mctp_prdebug("%s: 0x%hhx to %s", __func__, val,
107 		     reg ? "status" : "data");
108 
109 	return 0;
110 }
111 
112 static const struct mctp_binding_astlpc_ops astlpc_direct_mmio_ops = {
113 	.kcs_read = mctp_astlpc_mmio_kcs_read,
114 	.kcs_write = mctp_astlpc_mmio_kcs_write,
115 };
116 
117 int mctp_astlpc_mmio_lpc_read(void *data, void *buf, long offset, size_t len)
118 {
119 	struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data);
120 
121 	mctp_prdebug("%s: %zu bytes from 0x%lx", __func__, len, offset);
122 
123 	assert(offset >= 0L);
124 	assert(offset + len < mmio->lpc_size);
125 
126 	memcpy(buf, mmio->lpc + offset, len);
127 
128 	return 0;
129 }
130 
131 int mctp_astlpc_mmio_lpc_write(void *data, const void *buf, long offset,
132 			       size_t len)
133 {
134 	struct mctp_binding_astlpc_mmio *mmio = binding_to_mmio(data);
135 
136 	mctp_prdebug("%s: %zu bytes to 0x%lx", __func__, len, offset);
137 
138 	assert(offset >= 0L);
139 	assert(offset + len < mmio->lpc_size);
140 
141 	memcpy(mmio->lpc + offset, buf, len);
142 
143 	return 0;
144 }
145 
146 static const struct mctp_binding_astlpc_ops astlpc_indirect_mmio_ops = {
147 	.kcs_read = mctp_astlpc_mmio_kcs_read,
148 	.kcs_write = mctp_astlpc_mmio_kcs_write,
149 	.lpc_read = mctp_astlpc_mmio_lpc_read,
150 	.lpc_write = mctp_astlpc_mmio_lpc_write,
151 };
152 
153 static void astlpc_test_rx_message(uint8_t eid __unused,
154 				   bool tag_owner __unused,
155 				   uint8_t msg_tag __unused,
156 				   void *data __unused, void *msg, size_t len)
157 {
158 	struct astlpc_test *test = data;
159 
160 	mctp_prdebug("MCTP message received: msg: %p, len %zd", msg, len);
161 
162 	assert(len > 0);
163 	assert(msg);
164 	assert(test);
165 	assert(test->msg);
166 	assert(!memcmp(test->msg, msg, len));
167 
168 	test->count++;
169 }
170 
171 static int endpoint_init(struct astlpc_endpoint *ep, mctp_eid_t eid,
172 			 uint8_t mode, uint32_t mtu, uint8_t (*kcs)[2],
173 			 void *lpc_mem)
174 {
175 	/*
176 	 * Configure the direction of the KCS interface so we know whether to
177 	 * set or clear IBF or OBF on writes or reads.
178 	 */
179 	ep->mmio.bmc = (mode == MCTP_BINDING_ASTLPC_MODE_BMC);
180 
181 	ep->mctp = mctp_init();
182 	assert(ep->mctp);
183 
184 	/* Inject KCS registers */
185 	ep->mmio.kcs = kcs;
186 
187 	/* Initialise the binding */
188 	ep->astlpc = mctp_astlpc_init(mode, mtu, lpc_mem,
189 				      &astlpc_direct_mmio_ops, &ep->mmio);
190 	assert(ep->astlpc);
191 
192 	return mctp_register_bus(ep->mctp, &ep->astlpc->binding, eid);
193 }
194 
195 static void endpoint_destroy(struct astlpc_endpoint *ep)
196 {
197 	mctp_astlpc_destroy(ep->astlpc);
198 	mctp_destroy(ep->mctp);
199 }
200 
201 static void network_init(struct astlpc_test *ctx)
202 {
203 	int rc;
204 
205 	ctx->lpc_mem = calloc(1, 1 * 1024 * 1024);
206 	assert(ctx->lpc_mem);
207 
208 	/* BMC initialisation */
209 	rc = endpoint_init(&ctx->bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU,
210 			   &ctx->kcs, ctx->lpc_mem);
211 	assert(!rc);
212 	assert(ctx->kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_BMC_READY);
213 
214 	/* Host initialisation */
215 	rc = endpoint_init(&ctx->host, 9, MCTP_BINDING_ASTLPC_MODE_HOST,
216 			   MCTP_BTU, &ctx->kcs, ctx->lpc_mem);
217 	assert(!rc);
218 
219 	/* BMC processes host channel init request, alerts host */
220 	mctp_astlpc_poll(ctx->bmc.astlpc);
221 	assert(ctx->kcs[MCTP_ASTLPC_KCS_REG_STATUS] &
222 	       KCS_STATUS_CHANNEL_ACTIVE);
223 	assert(ctx->kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0xff);
224 
225 	/* Host dequeues channel init result */
226 	mctp_astlpc_poll(ctx->host.astlpc);
227 }
228 
229 static void network_destroy(struct astlpc_test *ctx)
230 {
231 	endpoint_destroy(&ctx->bmc);
232 	endpoint_destroy(&ctx->host);
233 	free(ctx->lpc_mem);
234 }
235 
236 static void astlpc_assert_tx_packet(struct astlpc_endpoint *src,
237 				    const void *expected, size_t len)
238 {
239 	const size_t tx_body = src->astlpc->layout.tx.offset + 4 + 4;
240 	const void *test = ((char *)src->astlpc->lpc_map) + tx_body;
241 	assert(!memcmp(test, expected, len));
242 }
243 
244 static void astlpc_test_packetised_message_bmc_to_host(void)
245 {
246 	struct astlpc_test ctx = { 0 };
247 	uint8_t msg[2 * MCTP_BTU];
248 	int rc;
249 
250 	/* Test harness initialisation */
251 
252 	network_init(&ctx);
253 
254 	memset(&msg[0], 0x5a, MCTP_BTU);
255 	memset(&msg[MCTP_BTU], 0xa5, MCTP_BTU);
256 
257 	ctx.msg = &msg[0];
258 	ctx.count = 0;
259 	mctp_set_rx_all(ctx.host.mctp, astlpc_test_rx_message, &ctx);
260 
261 	/* BMC sends a message */
262 	rc = mctp_message_tx(ctx.bmc.mctp, 9, MCTP_MESSAGE_TO_SRC, 0, msg,
263 			     sizeof(msg));
264 	assert(rc == 0);
265 
266 	/* Host receives the first packet */
267 	mctp_astlpc_poll(ctx.host.astlpc);
268 
269 	/* BMC dequeues ownership hand-over and sends the queued packet */
270 	rc = mctp_astlpc_poll(ctx.bmc.astlpc);
271 	assert(rc == 0);
272 
273 	/* Host receives the next packet */
274 	assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF);
275 	assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01);
276 
277 	astlpc_assert_tx_packet(&ctx.bmc, &msg[MCTP_BTU], MCTP_BTU);
278 
279 	/* Host receives final packet */
280 	mctp_astlpc_poll(ctx.host.astlpc);
281 	assert(ctx.count == 1);
282 
283 	network_destroy(&ctx);
284 }
285 
286 static void astlpc_test_simple_message_host_to_bmc(void)
287 {
288 	struct astlpc_test ctx = { 0 };
289 	uint8_t msg[MCTP_BTU];
290 	uint8_t tag = 0;
291 	int rc;
292 
293 	/* Test harness initialisation */
294 
295 	network_init(&ctx);
296 
297 	memset(&msg[0], 0xa5, MCTP_BTU);
298 
299 	ctx.msg = &msg[0];
300 	ctx.count = 0;
301 	mctp_set_rx_all(ctx.bmc.mctp, astlpc_test_rx_message, &ctx);
302 
303 	/* Host sends the single-packet message */
304 	rc = mctp_message_tx(ctx.host.mctp, 8, MCTP_MESSAGE_TO_DST, tag, msg,
305 			     sizeof(msg));
306 	assert(rc == 0);
307 	assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF);
308 	assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01);
309 
310 	astlpc_assert_tx_packet(&ctx.host, &msg[0], MCTP_BTU);
311 
312 	/* BMC receives the single-packet message */
313 	mctp_astlpc_poll(ctx.bmc.astlpc);
314 	assert(ctx.count == 1);
315 
316 	/* BMC returns Tx area ownership to Host */
317 	assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF));
318 	assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x02);
319 	assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF);
320 
321 	/* Host dequeues ownership hand-over and sends the queued packet */
322 	rc = mctp_astlpc_poll(ctx.host.astlpc);
323 	assert(rc == 0);
324 
325 	network_destroy(&ctx);
326 }
327 
328 static void astlpc_test_simple_message_bmc_to_host(void)
329 {
330 	struct astlpc_test ctx = { 0 };
331 	uint8_t msg[MCTP_BTU];
332 	uint8_t tag = 0;
333 	int rc;
334 
335 	/* Test harness initialisation */
336 
337 	network_init(&ctx);
338 
339 	memset(&msg[0], 0x5a, MCTP_BTU);
340 
341 	ctx.msg = &msg[0];
342 	ctx.count = 0;
343 	mctp_set_rx_all(ctx.host.mctp, astlpc_test_rx_message, &ctx);
344 
345 	/* BMC sends the single-packet message */
346 	rc = mctp_message_tx(ctx.bmc.mctp, 9, MCTP_MESSAGE_TO_SRC, tag, msg,
347 			     sizeof(msg));
348 	assert(rc == 0);
349 	assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF);
350 	assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01);
351 
352 	astlpc_assert_tx_packet(&ctx.bmc, &msg[0], MCTP_BTU);
353 
354 	/* Host receives the single-packet message */
355 	mctp_astlpc_poll(ctx.host.astlpc);
356 	assert(ctx.count == 1);
357 
358 	/* Host returns Rx area ownership to BMC */
359 	assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF));
360 	assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x02);
361 	assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF);
362 
363 	/* BMC dequeues ownership hand-over and sends the queued packet */
364 	rc = mctp_astlpc_poll(ctx.bmc.astlpc);
365 	assert(rc == 0);
366 
367 	network_destroy(&ctx);
368 }
369 
370 static void astlpc_test_host_before_bmc(void)
371 {
372 	struct mctp_binding_astlpc_mmio mmio = { 0 };
373 	struct mctp_binding_astlpc *astlpc;
374 	uint8_t kcs[2] = { 0 };
375 	struct mctp *mctp;
376 	int rc;
377 
378 	mctp = mctp_init();
379 	assert(mctp);
380 
381 	/* Inject KCS registers */
382 	mmio.kcs = &kcs;
383 
384 	/* Initialise the binding */
385 	astlpc = mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, NULL,
386 				  &astlpc_direct_mmio_ops, &mmio);
387 
388 	/* Register the binding to trigger the start-up sequence */
389 	rc = mctp_register_bus(mctp, &astlpc->binding, 8);
390 
391 	/* Start-up should fail as we haven't initialised the BMC */
392 	assert(rc < 0);
393 
394 	mctp_astlpc_destroy(astlpc);
395 	mctp_destroy(mctp);
396 }
397 
398 static void astlpc_test_bad_version(void)
399 {
400 	assert(0 ==
401 	       mctp_astlpc_negotiate_version(ASTLPC_VER_BAD, ASTLPC_VER_CUR,
402 					     ASTLPC_VER_MIN, ASTLPC_VER_CUR));
403 	assert(0 ==
404 	       mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_BAD,
405 					     ASTLPC_VER_MIN, ASTLPC_VER_CUR));
406 	assert(0 ==
407 	       mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_CUR,
408 					     ASTLPC_VER_BAD, ASTLPC_VER_CUR));
409 	assert(0 ==
410 	       mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_CUR,
411 					     ASTLPC_VER_MIN, ASTLPC_VER_BAD));
412 	assert(0 == mctp_astlpc_negotiate_version(
413 			    ASTLPC_VER_CUR + 1, ASTLPC_VER_CUR, ASTLPC_VER_MIN,
414 			    ASTLPC_VER_CUR + 1));
415 	assert(0 == mctp_astlpc_negotiate_version(
416 			    ASTLPC_VER_MIN, ASTLPC_VER_CUR + 1,
417 			    ASTLPC_VER_CUR + 1, ASTLPC_VER_CUR));
418 }
419 
420 static void astlpc_test_incompatible_versions(void)
421 {
422 	assert(0 == mctp_astlpc_negotiate_version(
423 			    ASTLPC_VER_CUR, ASTLPC_VER_CUR, ASTLPC_VER_CUR + 1,
424 			    ASTLPC_VER_CUR + 1));
425 	assert(0 == mctp_astlpc_negotiate_version(
426 			    ASTLPC_VER_CUR + 1, ASTLPC_VER_CUR + 1,
427 			    ASTLPC_VER_CUR, ASTLPC_VER_CUR));
428 }
429 
430 static void astlpc_test_choose_bmc_ver_cur(void)
431 {
432 	assert(2 == mctp_astlpc_negotiate_version(1, 2, 2, 3));
433 }
434 
435 static void astlpc_test_choose_host_ver_cur(void)
436 {
437 	assert(2 == mctp_astlpc_negotiate_version(2, 3, 1, 2));
438 }
439 
440 static void astlpc_test_version_host_fails_negotiation(void)
441 {
442 	struct astlpc_endpoint bmc, host;
443 	struct mctp_lpcmap_hdr *hdr;
444 	uint8_t kcs[2] = { 0 };
445 	void *lpc_mem;
446 	int rc;
447 
448 	/* Test harness initialisation */
449 	lpc_mem = calloc(1, 1 * 1024 * 1024);
450 	assert(lpc_mem);
451 
452 	/* BMC initialisation */
453 	rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU,
454 			   &kcs, lpc_mem);
455 	assert(!rc);
456 
457 	/* Now the BMC is initialised, break its version announcement */
458 	hdr = lpc_mem;
459 	hdr->bmc_ver_cur = ASTLPC_VER_BAD;
460 
461 	/* Host initialisation */
462 	rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU,
463 			   &kcs, lpc_mem);
464 	assert(rc < 0);
465 
466 	endpoint_destroy(&bmc);
467 	endpoint_destroy(&host);
468 	free(lpc_mem);
469 }
470 
471 static void astlpc_test_version_bmc_fails_negotiation(void)
472 {
473 	struct astlpc_endpoint bmc, host;
474 	struct mctp_lpcmap_hdr *hdr;
475 	uint8_t kcs[2] = { 0 };
476 	void *lpc_mem;
477 	int rc;
478 
479 	/* Test harness initialisation */
480 	lpc_mem = calloc(1, 1 * 1024 * 1024);
481 	assert(lpc_mem);
482 
483 	/* BMC initialisation */
484 	rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU,
485 			   &kcs, lpc_mem);
486 	assert(!rc);
487 
488 	/* Host initialisation */
489 	rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU,
490 			   &kcs, lpc_mem);
491 	assert(!rc);
492 
493 	/* Now the host is initialised, break its version announcement */
494 	hdr = lpc_mem;
495 	hdr->host_ver_cur = ASTLPC_VER_BAD;
496 
497 	/* Poll the BMC to detect the broken host version */
498 	mctp_astlpc_poll(bmc.astlpc);
499 	assert(!(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_CHANNEL_ACTIVE));
500 
501 	/* Poll the host so it detects failed negotiation */
502 	rc = mctp_astlpc_poll(host.astlpc);
503 	assert(rc < 0);
504 
505 	endpoint_destroy(&bmc);
506 	endpoint_destroy(&host);
507 	free(lpc_mem);
508 }
509 
510 static void astlpc_test_simple_init(void)
511 {
512 	struct astlpc_endpoint bmc, host;
513 	uint8_t kcs[2] = { 0 };
514 	void *lpc_mem;
515 	int rc;
516 
517 	/* Test harness initialisation */
518 	lpc_mem = calloc(1, 1 * 1024 * 1024);
519 	assert(lpc_mem);
520 
521 	/* BMC initialisation */
522 	rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU,
523 			   &kcs, lpc_mem);
524 	assert(!rc);
525 
526 	/* Verify the BMC binding was initialised */
527 	assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_BMC_READY);
528 
529 	/* Host initialisation */
530 	rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU,
531 			   &kcs, lpc_mem);
532 	assert(!rc);
533 
534 	/* Host sends channel init command */
535 	assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF);
536 	assert(kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x00);
537 
538 	/* BMC receives host channel init request */
539 	mctp_astlpc_poll(bmc.astlpc);
540 
541 	/* BMC sends init response */
542 	assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF);
543 	assert(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_CHANNEL_ACTIVE);
544 	assert(kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0xff);
545 
546 	/* Host dequeues data */
547 	mctp_astlpc_poll(host.astlpc);
548 
549 	endpoint_destroy(&bmc);
550 	endpoint_destroy(&host);
551 	free(lpc_mem);
552 }
553 
554 static void astlpc_test_simple_indirect_message_bmc_to_host(void)
555 {
556 	struct astlpc_test ctx = { 0 };
557 	uint8_t kcs[2] = { 0 };
558 	uint8_t msg[MCTP_BTU];
559 	uint8_t tag = 0;
560 	int rc;
561 
562 	ctx.lpc_mem = calloc(1, LPC_WIN_SIZE);
563 	assert(ctx.lpc_mem);
564 
565 	/* Test message data */
566 	memset(&msg[0], 0x5a, MCTP_BTU);
567 
568 	/* Manually set up the network so we can inject the indirect ops */
569 
570 	/* BMC initialisation */
571 	ctx.bmc.mmio.bmc = true;
572 	ctx.bmc.mctp = mctp_init();
573 	assert(ctx.bmc.mctp);
574 	ctx.bmc.mmio.kcs = &kcs;
575 	ctx.bmc.mmio.lpc = ctx.lpc_mem;
576 	ctx.bmc.mmio.lpc_size = LPC_WIN_SIZE;
577 	ctx.bmc.astlpc =
578 		mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, NULL,
579 				 &astlpc_indirect_mmio_ops, &ctx.bmc.mmio);
580 	mctp_register_bus(ctx.bmc.mctp, &ctx.bmc.astlpc->binding, 8);
581 
582 	/* Host initialisation */
583 	ctx.host.mmio.bmc = false;
584 	ctx.host.mctp = mctp_init();
585 	assert(ctx.host.mctp);
586 	ctx.host.mmio.kcs = &kcs;
587 	ctx.host.mmio.lpc = ctx.lpc_mem;
588 	ctx.host.mmio.lpc_size = LPC_WIN_SIZE;
589 	ctx.host.astlpc =
590 		mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU, NULL,
591 				 &astlpc_indirect_mmio_ops, &ctx.host.mmio);
592 	mctp_register_bus(ctx.host.mctp, &ctx.host.astlpc->binding, 9);
593 
594 	/* BMC processes host channel init request, alerts host */
595 	mctp_astlpc_poll(ctx.bmc.astlpc);
596 
597 	/* Host dequeues channel init result */
598 	mctp_astlpc_poll(ctx.host.astlpc);
599 
600 	ctx.msg = &msg[0];
601 	ctx.count = 0;
602 	mctp_set_rx_all(ctx.host.mctp, astlpc_test_rx_message, &ctx);
603 
604 	/* BMC sends the single-packet message */
605 	rc = mctp_message_tx(ctx.bmc.mctp, 9, MCTP_MESSAGE_TO_SRC, tag, msg,
606 			     sizeof(msg));
607 	assert(rc == 0);
608 
609 	/* Host receives the single-packet message */
610 	rc = mctp_astlpc_poll(ctx.host.astlpc);
611 	assert(rc == 0);
612 	assert(ctx.count == 1);
613 
614 	/* BMC dequeues ownership hand-over and sends the queued packet */
615 	rc = mctp_astlpc_poll(ctx.bmc.astlpc);
616 	assert(rc == 0);
617 
618 	/* Can still tear-down the network in the normal fashion */
619 	network_destroy(&ctx);
620 }
621 
622 static void astlpc_test_host_tx_bmc_gone(void)
623 {
624 	struct astlpc_test ctx = { 0 };
625 	uint8_t unwritten[MCTP_BTU];
626 	uint8_t msg[MCTP_BTU];
627 	uint8_t tag = 0;
628 	int rc;
629 
630 	/* Test harness initialisation */
631 
632 	network_init(&ctx);
633 
634 	memset(&msg[0], 0x5a, sizeof(msg));
635 	memset(&unwritten[0], 0, sizeof(unwritten));
636 
637 	ctx.msg = &msg[0];
638 	ctx.count = 0;
639 
640 	/* Clear bmc-ready */
641 	endpoint_destroy(&ctx.bmc);
642 
643 	/* Host detects that the BMC is disabled */
644 	mctp_astlpc_poll(ctx.host.astlpc);
645 
646 	/* Host attempts to send the single-packet message, but is prevented */
647 	rc = mctp_message_tx(ctx.host.mctp, 8, MCTP_MESSAGE_TO_DST, tag, msg,
648 			     sizeof(msg));
649 	assert(rc == 0);
650 	assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF));
651 	astlpc_assert_tx_packet(&ctx.host, &unwritten[0], MCTP_BTU);
652 
653 	/* BMC comes back */
654 	rc = endpoint_init(&ctx.bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU,
655 			   &ctx.kcs, ctx.lpc_mem);
656 	assert(!rc);
657 	mctp_set_rx_all(ctx.bmc.mctp, astlpc_test_rx_message, &ctx);
658 
659 	/* Host triggers channel init */
660 	mctp_astlpc_poll(ctx.host.astlpc);
661 
662 	/* BMC handles channel init */
663 	mctp_astlpc_poll(ctx.bmc.astlpc);
664 
665 	/* Host completes channel init, flushing the Tx queue */
666 	mctp_astlpc_poll(ctx.host.astlpc);
667 
668 	/* BMC receives the single-packet message */
669 	mctp_astlpc_poll(ctx.bmc.astlpc);
670 	assert(ctx.count == 1);
671 
672 	network_destroy(&ctx);
673 }
674 
675 static void astlpc_test_poll_not_ready(void)
676 {
677 	struct astlpc_endpoint bmc;
678 	uint8_t kcs[2] = { 0 };
679 	void *lpc_mem;
680 	int rc;
681 
682 	/* Test harness initialisation */
683 	lpc_mem = calloc(1, 1 * 1024 * 1024);
684 	assert(lpc_mem);
685 
686 	/* BMC initialisation */
687 	rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU,
688 			   &kcs, lpc_mem);
689 	assert(!rc);
690 
691 	/* Check for a command despite none present */
692 	rc = mctp_astlpc_poll(bmc.astlpc);
693 
694 	/* Make sure it doesn't fail */
695 	assert(rc == 0);
696 
697 	endpoint_destroy(&bmc);
698 	free(lpc_mem);
699 }
700 
701 static void astlpc_test_undefined_command(void)
702 {
703 	struct astlpc_endpoint bmc;
704 	uint8_t kcs[2] = { 0 };
705 	void *lpc_mem;
706 	int rc;
707 
708 	/* Test harness initialisation */
709 	lpc_mem = calloc(1, 1 * 1024 * 1024);
710 	assert(lpc_mem);
711 
712 	/* BMC initialisation */
713 	rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU,
714 			   &kcs, lpc_mem);
715 	assert(!rc);
716 
717 	/* 0x5a isn't legal in v1 or v2 */
718 	kcs[MCTP_ASTLPC_KCS_REG_DATA] = 0x5a;
719 	kcs[MCTP_ASTLPC_KCS_REG_STATUS] |= KCS_STATUS_IBF;
720 
721 	/* Check for a command despite none present */
722 	rc = mctp_astlpc_poll(bmc.astlpc);
723 
724 	/* Make sure it doesn't fail, bad command should be discarded */
725 	assert(rc == 0);
726 
727 	endpoint_destroy(&bmc);
728 	free(lpc_mem);
729 }
730 
731 #define BUFFER_MIN (MCTP_PACKET_SIZE(MCTP_BTU) + 4 + 4)
732 static const struct mctp_binding_astlpc astlpc_layout_ctx = {
733 	.proto = &astlpc_protocol_version[3],
734 };
735 
736 static void astlpc_test_buffers_rx_offset_overflow(void)
737 {
738 	struct mctp_astlpc_layout l = {
739 		.rx = { UINT32_MAX, BUFFER_MIN },
740 		.tx = { control_size, BUFFER_MIN },
741 	};
742 
743 	assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l));
744 }
745 
746 static void astlpc_test_buffers_tx_offset_overflow(void)
747 {
748 	struct mctp_astlpc_layout l = {
749 		.rx = { control_size, BUFFER_MIN },
750 		.tx = { UINT32_MAX, BUFFER_MIN },
751 	};
752 
753 	assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l));
754 }
755 
756 static void astlpc_test_buffers_rx_size_overflow(void)
757 {
758 	struct mctp_astlpc_layout l = {
759 		.rx = { control_size + BUFFER_MIN, UINT32_MAX },
760 		.tx = { control_size, BUFFER_MIN },
761 	};
762 
763 	assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l));
764 }
765 
766 static void astlpc_test_buffers_tx_size_overflow(void)
767 {
768 	struct mctp_astlpc_layout l = {
769 		.rx = { control_size, BUFFER_MIN },
770 		.tx = { control_size + BUFFER_MIN, UINT32_MAX },
771 	};
772 
773 	assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l));
774 }
775 
776 static void astlpc_test_buffers_rx_window_violation(void)
777 {
778 	struct mctp_astlpc_layout l = {
779 		.rx = { LPC_WIN_SIZE - BUFFER_MIN + 1, BUFFER_MIN },
780 		.tx = { control_size, BUFFER_MIN },
781 	};
782 
783 	assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l));
784 }
785 
786 static void astlpc_test_buffers_tx_window_violation(void)
787 {
788 	struct mctp_astlpc_layout l = {
789 		.rx = { control_size, BUFFER_MIN },
790 		.tx = { LPC_WIN_SIZE - BUFFER_MIN + 1, BUFFER_MIN },
791 	};
792 
793 	assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l));
794 }
795 
796 static void astlpc_test_buffers_rx_size_fails_btu(void)
797 {
798 	struct mctp_astlpc_layout l = {
799 		.rx = { control_size, BUFFER_MIN - 1 },
800 		.tx = { control_size + BUFFER_MIN, BUFFER_MIN },
801 	};
802 
803 	assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l));
804 }
805 
806 static void astlpc_test_buffers_tx_size_fails_btu(void)
807 {
808 	struct mctp_astlpc_layout l = {
809 		.rx = { control_size, BUFFER_MIN },
810 		.tx = { control_size + BUFFER_MIN, BUFFER_MIN - 1 },
811 	};
812 
813 	assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l));
814 }
815 
816 static void astlpc_test_buffers_overlap_rx_low(void)
817 {
818 	struct mctp_astlpc_layout l = {
819 		.rx = { control_size, 2 * BUFFER_MIN },
820 		.tx = { control_size + BUFFER_MIN, 2 * BUFFER_MIN },
821 	};
822 
823 	assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l));
824 }
825 
826 static void astlpc_test_buffers_overlap_tx_low(void)
827 {
828 	struct mctp_astlpc_layout l = {
829 		.rx = { control_size + BUFFER_MIN, 2 * BUFFER_MIN },
830 		.tx = { control_size, 2 * BUFFER_MIN },
831 	};
832 
833 	assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l));
834 }
835 
836 static void astlpc_test_buffers_overlap_exact(void)
837 {
838 	struct mctp_astlpc_layout l = {
839 		.rx = { control_size, 2 * BUFFER_MIN },
840 		.tx = { control_size, 2 * BUFFER_MIN },
841 	};
842 
843 	assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l));
844 }
845 
846 static void astlpc_test_buffers_overlap_control(void)
847 {
848 	struct mctp_astlpc_layout l = {
849 		.rx = { 0, BUFFER_MIN },
850 		.tx = { control_size + BUFFER_MIN, BUFFER_MIN },
851 	};
852 
853 	assert(!mctp_astlpc_layout_validate(&astlpc_layout_ctx, &l));
854 }
855 
856 static void astlpc_test_buffers_bad_host_proposal(void)
857 {
858 	struct astlpc_endpoint bmc, host;
859 	struct mctp_lpcmap_hdr *hdr;
860 	uint8_t kcs[2] = { 0 };
861 	void *lpc_mem;
862 	int rc;
863 
864 	/* Test harness initialisation */
865 	lpc_mem = calloc(1, 1 * 1024 * 1024);
866 	assert(lpc_mem);
867 
868 	/* BMC initialisation */
869 	rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU,
870 			   &kcs, lpc_mem);
871 	assert(!rc);
872 
873 	/* Host initialisation */
874 	rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU,
875 			   &kcs, lpc_mem);
876 	assert(!rc);
877 
878 	/*
879 	 * Now that the host has initialised the control area, break
880 	 * something before polling the BMC
881 	 */
882 	hdr = lpc_mem;
883 	hdr->layout.rx_size = 0;
884 
885 	mctp_astlpc_poll(bmc.astlpc);
886 
887 	/* Make sure the BMC has not set the channel to active */
888 	assert(!(kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_CHANNEL_ACTIVE));
889 
890 	endpoint_destroy(&host);
891 	endpoint_destroy(&bmc);
892 	free(lpc_mem);
893 }
894 
895 static void astlpc_test_buffers_bad_bmc_proposal(void)
896 {
897 	struct astlpc_endpoint bmc, host;
898 	struct mctp_lpcmap_hdr *hdr;
899 	uint8_t kcs[2] = { 0 };
900 	void *lpc_mem;
901 	int rc;
902 
903 	/* Test harness initialisation */
904 	lpc_mem = calloc(1, 1 * 1024 * 1024);
905 	assert(lpc_mem);
906 
907 	/* BMC initialisation */
908 	rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU,
909 			   &kcs, lpc_mem);
910 	assert(!rc);
911 
912 	/*
913 	 * Now that the BMC has initialised the control area, break something
914 	 * before initialising the host
915 	 */
916 	hdr = lpc_mem;
917 	hdr->layout.rx_size = 0;
918 
919 	/* Host initialisation: Fails due to bad layout */
920 	rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU,
921 			   &kcs, lpc_mem);
922 	assert(rc < 0);
923 
924 	endpoint_destroy(&host);
925 	endpoint_destroy(&bmc);
926 	free(lpc_mem);
927 }
928 
929 static void astlpc_test_buffers_bad_bmc_negotiation(void)
930 {
931 	struct astlpc_endpoint bmc, host;
932 	struct mctp_lpcmap_hdr *hdr;
933 	uint8_t kcs[2] = { 0 };
934 	void *lpc_mem;
935 	int rc;
936 
937 	/* Test harness initialisation */
938 	lpc_mem = calloc(1, 1 * 1024 * 1024);
939 	assert(lpc_mem);
940 
941 	/* BMC initialisation */
942 	rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU,
943 			   &kcs, lpc_mem);
944 	assert(!rc);
945 
946 	/* Host initialisation */
947 	rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, MCTP_BTU,
948 			   &kcs, lpc_mem);
949 	assert(!rc);
950 
951 	mctp_astlpc_poll(bmc.astlpc);
952 
953 	/*
954 	 * Now that the BMC has initialised the control area, break something
955 	 * before polling the host
956 	 */
957 	hdr = lpc_mem;
958 	hdr->layout.rx_size = 0;
959 
960 	rc = mctp_astlpc_poll(host.astlpc);
961 	assert(rc < 0);
962 
963 	endpoint_destroy(&host);
964 	endpoint_destroy(&bmc);
965 	free(lpc_mem);
966 }
967 
968 static void astlpc_test_buffers_bad_host_init(void)
969 {
970 	struct astlpc_endpoint host;
971 	uint8_t kcs[2] = { 0 };
972 	void *lpc_mem;
973 	int rc;
974 
975 	/* Test harness initialisation */
976 	lpc_mem = calloc(1, 1 * 1024 * 1024);
977 	assert(lpc_mem);
978 
979 	host.mctp = mctp_init();
980 	assert(host.mctp);
981 	host.mmio.kcs = &kcs;
982 	host.mmio.bmc = false;
983 
984 	/* Set the MTU to 0 to provoke a failure */
985 	host.astlpc = mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_HOST, 0,
986 				       lpc_mem, &astlpc_direct_mmio_ops,
987 				       &host.mmio);
988 
989 	rc = mctp_register_bus(host.mctp, &host.astlpc->binding, 8);
990 	assert(rc < 0);
991 
992 	mctp_astlpc_destroy(host.astlpc);
993 	mctp_destroy(host.mctp);
994 	free(lpc_mem);
995 }
996 
997 static void astlpc_test_negotiate_increased_mtu(void)
998 {
999 	struct astlpc_endpoint bmc, host;
1000 	uint8_t kcs[2] = { 0 };
1001 	void *lpc_mem;
1002 	int rc;
1003 
1004 	/* Test harness initialisation */
1005 	lpc_mem = calloc(1, 1 * 1024 * 1024);
1006 	assert(lpc_mem);
1007 
1008 	/* BMC initialisation */
1009 	rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, 3 * MCTP_BTU,
1010 			   &kcs, lpc_mem);
1011 	assert(!rc);
1012 
1013 	/* Host initialisation */
1014 	rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST,
1015 			   2 * MCTP_BTU, &kcs, lpc_mem);
1016 	assert(!rc);
1017 
1018 	rc = mctp_astlpc_poll(bmc.astlpc);
1019 	assert(rc == 0);
1020 
1021 	rc = mctp_astlpc_poll(host.astlpc);
1022 	assert(rc == 0);
1023 
1024 	endpoint_destroy(&host);
1025 	endpoint_destroy(&bmc);
1026 	free(lpc_mem);
1027 }
1028 
1029 static void astlpc_test_negotiate_mtu_low_high(void)
1030 {
1031 	struct astlpc_endpoint bmc, host;
1032 	uint8_t kcs[2] = { 0 };
1033 	uint32_t bmtu, hmtu;
1034 	void *lpc_mem;
1035 	int rc;
1036 
1037 	/* Test harness initialisation */
1038 	lpc_mem = calloc(1, 1 * 1024 * 1024);
1039 	assert(lpc_mem);
1040 
1041 	/* BMC initialisation */
1042 	bmtu = 3 * MCTP_BTU;
1043 	rc = endpoint_init(&bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, bmtu, &kcs,
1044 			   lpc_mem);
1045 	assert(!rc);
1046 
1047 	/* Host initialisation with low MTU */
1048 	hmtu = 2 * MCTP_BTU;
1049 	rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, hmtu, &kcs,
1050 			   lpc_mem);
1051 	assert(!rc);
1052 
1053 	/* Process low MTU proposal */
1054 	rc = mctp_astlpc_poll(bmc.astlpc);
1055 	assert(rc == 0);
1056 
1057 	/* Accept low MTU proposal */
1058 	rc = mctp_astlpc_poll(host.astlpc);
1059 	assert(rc == 0);
1060 
1061 	assert(host.astlpc->layout.rx.size ==
1062 	       astlpc_layout_ctx.proto->packet_size(MCTP_PACKET_SIZE(hmtu)));
1063 
1064 	/* Tear-down the host so we can bring up a new one */
1065 	endpoint_destroy(&host);
1066 
1067 	/*
1068 	 * Bring up a new host endpoint with a higher MTU than we previously
1069 	 * negotiated
1070 	 */
1071 	hmtu = 3 * MCTP_BTU;
1072 	rc = endpoint_init(&host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, hmtu, &kcs,
1073 			   lpc_mem);
1074 	assert(!rc);
1075 
1076 	/* Process high MTU proposal */
1077 	rc = mctp_astlpc_poll(bmc.astlpc);
1078 	assert(rc == 0);
1079 
1080 	/* Accept high MTU proposal */
1081 	rc = mctp_astlpc_poll(host.astlpc);
1082 	assert(rc == 0);
1083 
1084 	assert(host.astlpc->layout.rx.size ==
1085 	       astlpc_layout_ctx.proto->packet_size(MCTP_PACKET_SIZE(bmtu)));
1086 
1087 	endpoint_destroy(&host);
1088 	endpoint_destroy(&bmc);
1089 	free(lpc_mem);
1090 }
1091 
1092 static void astlpc_test_send_large_packet(void)
1093 {
1094 	struct astlpc_endpoint *bmc, *host;
1095 	struct astlpc_test ctx;
1096 	uint8_t kcs[2] = { 0 };
1097 	uint8_t tag = 0;
1098 	void *lpc_mem;
1099 	int rc;
1100 
1101 	host = &ctx.host;
1102 	bmc = &ctx.bmc;
1103 
1104 	/* Test harness initialisation */
1105 	lpc_mem = calloc(1, 1 * 1024 * 1024);
1106 	assert(lpc_mem);
1107 
1108 	/* BMC initialisation */
1109 	rc = endpoint_init(bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, 8192, &kcs,
1110 			   lpc_mem);
1111 	assert(!rc);
1112 
1113 	/* Host initialisation */
1114 	rc = endpoint_init(host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, 8192, &kcs,
1115 			   lpc_mem);
1116 	assert(!rc);
1117 
1118 	ctx.count = 0;
1119 	mctp_set_rx_all(bmc->mctp, astlpc_test_rx_message, &ctx);
1120 
1121 	rc = mctp_astlpc_poll(bmc->astlpc);
1122 	assert(rc == 0);
1123 
1124 	rc = mctp_astlpc_poll(host->astlpc);
1125 	assert(rc == 0);
1126 
1127 	ctx.msg = malloc(2 * MCTP_BODY_SIZE(8192));
1128 	assert(ctx.msg);
1129 
1130 	memset(ctx.msg, 0x5a, 2 * MCTP_BODY_SIZE(8192));
1131 
1132 	rc = mctp_message_tx(host->mctp, 8, MCTP_MESSAGE_TO_DST, tag, ctx.msg,
1133 			     2 * MCTP_BODY_SIZE(8192));
1134 	assert(rc == 0);
1135 	rc = mctp_astlpc_poll(bmc->astlpc);
1136 	assert(rc == 0);
1137 	rc = mctp_astlpc_poll(host->astlpc);
1138 	assert(rc == 0);
1139 	rc = mctp_astlpc_poll(bmc->astlpc);
1140 	assert(rc == 0);
1141 	rc = mctp_astlpc_poll(host->astlpc);
1142 	assert(rc == 0);
1143 
1144 	assert(ctx.count == 1);
1145 
1146 	free(ctx.msg);
1147 	endpoint_destroy(host);
1148 	endpoint_destroy(bmc);
1149 	free(lpc_mem);
1150 }
1151 
1152 static void astlpc_test_negotiate_mtu_high_low(void)
1153 {
1154 	uint8_t msg[3 * MCTP_BTU] = { 0 };
1155 	struct astlpc_test ctx = { 0 };
1156 	uint32_t bmtu, hmtu;
1157 	uint8_t tag = 0;
1158 	int rc;
1159 
1160 	/* Configure message */
1161 	memset(&msg[0], 0xa5, sizeof(msg));
1162 
1163 	/* Test harness initialisation */
1164 	ctx.lpc_mem = calloc(1, 1 * 1024 * 1024);
1165 	assert(ctx.lpc_mem);
1166 
1167 	/* BMC initialisation */
1168 	bmtu = 3 * MCTP_BTU;
1169 	rc = endpoint_init(&ctx.bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, bmtu,
1170 			   &ctx.kcs, ctx.lpc_mem);
1171 	assert(!rc);
1172 
1173 	/* Host initialisation with low MTU */
1174 	hmtu = 3 * MCTP_BTU;
1175 	rc = endpoint_init(&ctx.host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, hmtu,
1176 			   &ctx.kcs, ctx.lpc_mem);
1177 	assert(!rc);
1178 
1179 	/* Configure host message handler */
1180 	ctx.msg = &msg[0];
1181 	ctx.count = 0;
1182 	mctp_set_rx_all(ctx.host.mctp, astlpc_test_rx_message, &ctx);
1183 
1184 	/* Startup BMC and host interfaces */
1185 	rc = mctp_astlpc_poll(ctx.bmc.astlpc);
1186 	assert(rc == 0);
1187 	rc = mctp_astlpc_poll(ctx.host.astlpc);
1188 	assert(rc == 0);
1189 
1190 	/*
1191 	 * Transmit a message to place a packet on the interface. This releases the buffer and
1192 	 * disables the binding, plugging the binding's transmit queue while the host hasn't polled
1193 	 * to pull the packet off.
1194 	 */
1195 	rc = mctp_message_tx(ctx.bmc.mctp, 9, MCTP_MESSAGE_TO_DST, tag, msg,
1196 			     sizeof(msg));
1197 
1198 	/* Leave the packet in place on the interface by not polling the host binding */
1199 
1200 	/*
1201 	 * Transmit another message to force packetisation at the current MTU while the binding is
1202 	 * disabled, leaving the packet(s) in the binding's transmit queue
1203 	 */
1204 	rc = mctp_message_tx(ctx.bmc.mctp, 9, MCTP_MESSAGE_TO_DST, tag, msg,
1205 			     sizeof(msg));
1206 
1207 	/* Tear-down the host so we can bring up a new one */
1208 	endpoint_destroy(&ctx.host);
1209 
1210 	/* Bring up a new host endpoint with a lower MTU than we previously negotiated */
1211 	hmtu = 2 * MCTP_BTU;
1212 	rc = endpoint_init(&ctx.host, 9, MCTP_BINDING_ASTLPC_MODE_HOST, hmtu,
1213 			   &ctx.kcs, ctx.lpc_mem);
1214 	assert(!rc);
1215 
1216 	/* Configure host message handler again after reinitialisation */
1217 	ctx.msg = &msg[0];
1218 	ctx.count = 0;
1219 	mctp_set_rx_all(ctx.host.mctp, astlpc_test_rx_message, &ctx);
1220 
1221 	/* Process low MTU proposal */
1222 	rc = mctp_astlpc_poll(ctx.bmc.astlpc);
1223 	assert(rc == 0);
1224 
1225 	/* Accept low MTU proposal */
1226 	rc = mctp_astlpc_poll(ctx.host.astlpc);
1227 	assert(rc == 0);
1228 
1229 	/*
1230 	 * Check that there are no outstanding messages to be received by the host. The message
1231 	 * packetised on the BMC at the larger MTU must be dropped as its now no longer possible to
1232 	 * transmit those packets
1233 	 */
1234 	rc = mctp_astlpc_poll(ctx.host.astlpc);
1235 	assert(rc == 0);
1236 	assert(ctx.count == 0);
1237 
1238 	/* Transmit another message from the BMC to the host, packetised using the new MTU */
1239 	rc = mctp_message_tx(ctx.bmc.mctp, 9, MCTP_MESSAGE_TO_DST, tag, msg,
1240 			     hmtu);
1241 
1242 	/* Check that the most recent BMC transmission is received by the host */
1243 	rc = mctp_astlpc_poll(ctx.host.astlpc);
1244 	assert(rc == 0);
1245 	assert(ctx.count == 1);
1246 
1247 	/* Ensure buffer ownership is returned to the BMC and the BMC Tx queue is processed */
1248 	rc = mctp_astlpc_poll(ctx.bmc.astlpc);
1249 	assert(rc == 0);
1250 
1251 	/* Check that no further messages are propagated to the host */
1252 	rc = mctp_astlpc_poll(ctx.host.astlpc);
1253 	assert(rc == 0);
1254 	assert(ctx.count == 1);
1255 
1256 	endpoint_destroy(&ctx.host);
1257 	endpoint_destroy(&ctx.bmc);
1258 	free(ctx.lpc_mem);
1259 }
1260 
1261 static void astlpc_test_tx_before_channel_init(void)
1262 {
1263 	struct astlpc_endpoint *bmc;
1264 	struct astlpc_test ctx;
1265 	uint8_t kcs[2] = { 0 };
1266 	uint8_t msg[MCTP_BTU];
1267 	uint8_t tag = 0;
1268 	void *lpc_mem;
1269 	int rc;
1270 
1271 	bmc = &ctx.bmc;
1272 
1273 	/* Test harness initialisation */
1274 	lpc_mem = calloc(1, 1 * 1024 * 1024);
1275 	assert(lpc_mem);
1276 
1277 	/* BMC initialisation */
1278 	rc = endpoint_init(bmc, 8, MCTP_BINDING_ASTLPC_MODE_BMC, 0, &kcs,
1279 			   lpc_mem);
1280 	assert(!rc);
1281 
1282 	memset(msg, '\0', sizeof(msg));
1283 
1284 	/*
1285 	 * There was once a bug where the calculated MTU was 0 and the
1286 	 * packetisation loop in mctp_message_tx_on_bus() allocated all the
1287 	 * memory. Catch the bug and avoid OOMing the test machine by
1288 	 * terminating after a period long enough to packetise the message.
1289 	 */
1290 	alarm(1);
1291 	mctp_message_tx(bmc->mctp, 9, MCTP_MESSAGE_TO_SRC, tag, msg,
1292 			sizeof(msg));
1293 	alarm(0);
1294 
1295 	endpoint_destroy(bmc);
1296 	free(lpc_mem);
1297 }
1298 
1299 static void astlpc_test_corrupt_host_tx(void)
1300 {
1301 	struct astlpc_test ctx = { 0 };
1302 	struct mctp_lpcmap_hdr *hdr;
1303 	uint8_t msg[MCTP_BTU];
1304 	uint32_t offset;
1305 	uint8_t tag = 0;
1306 	uint32_t code;
1307 	uint8_t *tlr;
1308 	int rc;
1309 
1310 	/* Test harness initialisation */
1311 
1312 	network_init(&ctx);
1313 
1314 	memset(&msg[0], 0xa5, MCTP_BTU);
1315 
1316 	ctx.msg = &msg[0];
1317 	ctx.count = 0;
1318 	mctp_set_rx_all(ctx.bmc.mctp, astlpc_test_rx_message, &ctx);
1319 
1320 	/* Host sends the single-packet message */
1321 	rc = mctp_message_tx(ctx.host.mctp, 8, MCTP_MESSAGE_TO_DST, tag, msg,
1322 			     sizeof(msg));
1323 	assert(rc == 0);
1324 	assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF);
1325 	assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01);
1326 
1327 	astlpc_assert_tx_packet(&ctx.host, &msg[0], MCTP_BTU);
1328 
1329 	/* Corrupt the CRC-32 in the message trailer */
1330 	hdr = (struct mctp_lpcmap_hdr *)ctx.lpc_mem;
1331 	offset = be32toh(hdr->layout.tx_offset);
1332 	tlr = (uint8_t *)&ctx.lpc_mem[offset] + 4 + sizeof(msg);
1333 	memcpy(&code, tlr, sizeof(code));
1334 	code = ~code;
1335 	memcpy(tlr, &code, sizeof(code));
1336 
1337 	/* BMC receives the single-packet message */
1338 	mctp_astlpc_poll(ctx.bmc.astlpc);
1339 	assert(ctx.count == 0);
1340 
1341 	/* BMC returns Tx area ownership to Host */
1342 	assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF));
1343 	assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x02);
1344 	assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF);
1345 
1346 	/* Host dequeues ownership hand-over */
1347 	rc = mctp_astlpc_poll(ctx.host.astlpc);
1348 	assert(rc == 0);
1349 
1350 	network_destroy(&ctx);
1351 }
1352 
1353 static void astlpc_test_corrupt_bmc_tx(void)
1354 {
1355 	struct astlpc_test ctx = { 0 };
1356 	struct mctp_lpcmap_hdr *hdr;
1357 	uint8_t msg[MCTP_BTU];
1358 	uint32_t offset;
1359 	uint8_t tag = 0;
1360 	uint32_t code;
1361 	uint8_t *tlr;
1362 	int rc;
1363 
1364 	/* Test harness initialisation */
1365 
1366 	network_init(&ctx);
1367 
1368 	memset(&msg[0], 0x5a, MCTP_BTU);
1369 
1370 	ctx.msg = &msg[0];
1371 	ctx.count = 0;
1372 	mctp_set_rx_all(ctx.host.mctp, astlpc_test_rx_message, &ctx);
1373 
1374 	/* BMC sends the single-packet message */
1375 	rc = mctp_message_tx(ctx.bmc.mctp, 9, MCTP_MESSAGE_TO_SRC, tag, msg,
1376 			     sizeof(msg));
1377 	assert(rc == 0);
1378 	assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF);
1379 	assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x01);
1380 
1381 	/* Check that the BMC sent a fully-formed packet */
1382 	astlpc_assert_tx_packet(&ctx.bmc, &msg[0], MCTP_BTU);
1383 
1384 	/* Corrupt the CRC-32 in the message trailer */
1385 	hdr = (struct mctp_lpcmap_hdr *)ctx.lpc_mem;
1386 	offset = be32toh(hdr->layout.rx_offset);
1387 	tlr = (uint8_t *)&ctx.lpc_mem[offset] + 4 + sizeof(msg);
1388 	memcpy(&code, tlr, sizeof(code));
1389 	code = ~code;
1390 	memcpy(tlr, &code, sizeof(code));
1391 
1392 	/* Host drops the single-packet message */
1393 	mctp_astlpc_poll(ctx.host.astlpc);
1394 	assert(ctx.count == 0);
1395 
1396 	/* Host returns Rx area ownership to BMC */
1397 	assert(!(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_OBF));
1398 	assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_DATA] == 0x02);
1399 	assert(ctx.kcs[MCTP_ASTLPC_KCS_REG_STATUS] & KCS_STATUS_IBF);
1400 
1401 	/* BMC dequeues ownership hand-over */
1402 	rc = mctp_astlpc_poll(ctx.bmc.astlpc);
1403 	assert(rc == 0);
1404 
1405 	network_destroy(&ctx);
1406 }
1407 
1408 static void astlpc_test_async_exchange(void)
1409 {
1410 	struct astlpc_test ctx = { 0 };
1411 	uint8_t msg[MCTP_BTU];
1412 	struct pollfd pollfd;
1413 	uint8_t tag = 0;
1414 
1415 	network_init(&ctx);
1416 
1417 	memset(&msg[0], 0x5a, MCTP_BTU);
1418 
1419 	/* (1)
1420 	 * Fill the KCS transmit buffer by sending a message from the BMC to the host without
1421 	 * dequeuing it on the host side
1422 	 */
1423 	mctp_message_tx(ctx.bmc.mctp, 9, MCTP_MESSAGE_TO_SRC, tag, msg,
1424 			sizeof(msg));
1425 
1426 	/* (2)
1427 	 * Assert that we're still listening for in-bound messages on the BMC
1428 	 */
1429 	mctp_astlpc_init_pollfd(ctx.bmc.astlpc, &pollfd);
1430 	assert(pollfd.events & POLLIN);
1431 	assert(!(pollfd.events & POLLOUT));
1432 
1433 	/* (3)
1434 	 * Send a message from the host to the BMC and dequeue the message on the BMC, triggering a
1435 	 * buffer ownership transfer command back to the host
1436 	 */
1437 	mctp_message_tx(ctx.host.mctp, 8, MCTP_MESSAGE_TO_SRC, tag, msg,
1438 			sizeof(msg));
1439 	mctp_astlpc_poll(ctx.bmc.astlpc);
1440 
1441 	/* (4)
1442 	 * Assert that the BMC has to wait for the host to dequeue the ownership transfer command
1443 	 * from (1) before further transfers take place.
1444 	 */
1445 	mctp_astlpc_init_pollfd(ctx.bmc.astlpc, &pollfd);
1446 	assert(!(pollfd.events & POLLIN));
1447 	assert(pollfd.events & POLLOUT);
1448 
1449 	/* (5)
1450 	 * Dequeue the message from (1) on the host side, allowing transmisson of the outstanding
1451 	 * ownership transfer command from (3)
1452 	 */
1453 	mctp_astlpc_poll(ctx.host.astlpc);
1454 
1455 	/* (6)
1456 	 * Emulate a POLLOUT event on the BMC side
1457 	 */
1458 	mctp_astlpc_poll(ctx.bmc.astlpc);
1459 
1460 	/* (7)
1461 	 * Assert that we're again listening for in-bound messages on the BMC.
1462 	 */
1463 	mctp_astlpc_init_pollfd(ctx.bmc.astlpc, &pollfd);
1464 	assert(pollfd.events & POLLIN);
1465 	assert(!(pollfd.events & POLLOUT));
1466 
1467 	network_destroy(&ctx);
1468 }
1469 
1470 /* clang-format off */
1471 #define TEST_CASE(test) { #test, test }
1472 static const struct {
1473 	const char *name;
1474 	void (*test)(void);
1475 } astlpc_tests[] = {
1476 	TEST_CASE(astlpc_test_simple_init),
1477 	TEST_CASE(astlpc_test_bad_version),
1478 	TEST_CASE(astlpc_test_incompatible_versions),
1479 	TEST_CASE(astlpc_test_choose_bmc_ver_cur),
1480 	TEST_CASE(astlpc_test_choose_host_ver_cur),
1481 	TEST_CASE(astlpc_test_version_host_fails_negotiation),
1482 	TEST_CASE(astlpc_test_version_bmc_fails_negotiation),
1483 	TEST_CASE(astlpc_test_host_before_bmc),
1484 	TEST_CASE(astlpc_test_simple_message_bmc_to_host),
1485 	TEST_CASE(astlpc_test_simple_message_host_to_bmc),
1486 	TEST_CASE(astlpc_test_packetised_message_bmc_to_host),
1487 	TEST_CASE(astlpc_test_simple_indirect_message_bmc_to_host),
1488 	TEST_CASE(astlpc_test_host_tx_bmc_gone),
1489 	TEST_CASE(astlpc_test_poll_not_ready),
1490 	TEST_CASE(astlpc_test_undefined_command),
1491 	TEST_CASE(astlpc_test_buffers_rx_offset_overflow),
1492 	TEST_CASE(astlpc_test_buffers_tx_offset_overflow),
1493 	TEST_CASE(astlpc_test_buffers_rx_size_overflow),
1494 	TEST_CASE(astlpc_test_buffers_tx_size_overflow),
1495 	TEST_CASE(astlpc_test_buffers_rx_window_violation),
1496 	TEST_CASE(astlpc_test_buffers_tx_window_violation),
1497 	TEST_CASE(astlpc_test_buffers_rx_size_fails_btu),
1498 	TEST_CASE(astlpc_test_buffers_tx_size_fails_btu),
1499 	TEST_CASE(astlpc_test_buffers_overlap_rx_low),
1500 	TEST_CASE(astlpc_test_buffers_overlap_tx_low),
1501 	TEST_CASE(astlpc_test_buffers_bad_host_proposal),
1502 	TEST_CASE(astlpc_test_buffers_bad_bmc_proposal),
1503 	TEST_CASE(astlpc_test_buffers_bad_bmc_negotiation),
1504 	TEST_CASE(astlpc_test_buffers_overlap_exact),
1505 	TEST_CASE(astlpc_test_buffers_overlap_control),
1506 	TEST_CASE(astlpc_test_buffers_bad_host_init),
1507 	TEST_CASE(astlpc_test_negotiate_increased_mtu),
1508 	TEST_CASE(astlpc_test_negotiate_mtu_low_high),
1509 	TEST_CASE(astlpc_test_negotiate_mtu_high_low),
1510 	TEST_CASE(astlpc_test_send_large_packet),
1511 	TEST_CASE(astlpc_test_tx_before_channel_init),
1512 	TEST_CASE(astlpc_test_corrupt_host_tx),
1513 	TEST_CASE(astlpc_test_corrupt_bmc_tx),
1514 	TEST_CASE(astlpc_test_async_exchange),
1515 };
1516 /* clang-format on */
1517 
1518 int main(void)
1519 {
1520 	size_t i;
1521 
1522 	mctp_set_log_stdio(MCTP_LOG_DEBUG);
1523 
1524 	static_assert(ARRAY_SIZE(astlpc_tests) < SIZE_MAX, "size");
1525 	for (i = 0; i < ARRAY_SIZE(astlpc_tests); i++) {
1526 		mctp_prlog(MCTP_LOG_DEBUG, "begin: %s", astlpc_tests[i].name);
1527 		astlpc_tests[i].test();
1528 		mctp_prlog(MCTP_LOG_DEBUG, "end: %s\n", astlpc_tests[i].name);
1529 	}
1530 
1531 	return 0;
1532 }
1533