xref: /openbmc/libmctp/astlpc.c (revision dca82599)
1 /* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */
2 
3 #if HAVE_CONFIG_H
4 #include "config.h"
5 #endif
6 
7 #if HAVE_ENDIAN_H
8 #include <endian.h>
9 #endif
10 
11 #include <assert.h>
12 #include <err.h>
13 #include <errno.h>
14 #include <inttypes.h>
15 #include <stdbool.h>
16 #include <stdlib.h>
17 #include <string.h>
18 
19 #define pr_fmt(x) "astlpc: " x
20 
21 #include "container_of.h"
22 #include "crc32.h"
23 #include "libmctp.h"
24 #include "libmctp-alloc.h"
25 #include "libmctp-log.h"
26 #include "libmctp-astlpc.h"
27 #include "range.h"
28 
29 #ifdef MCTP_HAVE_FILEIO
30 
31 #include <unistd.h>
32 #include <fcntl.h>
33 #include <poll.h>
34 #include <sys/ioctl.h>
35 #include <sys/mman.h>
36 #include <linux/aspeed-lpc-ctrl.h>
37 
38 /* kernel interface */
39 static const char *kcs_path = "/dev/mctp0";
40 static const char *lpc_path = "/dev/aspeed-lpc-ctrl";
41 
42 #endif
43 
44 enum mctp_astlpc_cmd {
45 	cmd_initialise = 0x00,
46 	cmd_tx_begin = 0x01,
47 	cmd_rx_complete = 0x02,
48 	cmd_dummy_value = 0xff,
49 };
50 
51 enum mctp_astlpc_buffer_state {
52 	/*
53 	 * Prior to "Channel Ready" we mark the buffers as "idle" to catch illegal accesses. In this
54 	 * state neither side is considered the owner of the buffer.
55 	 *
56 	 * Upon "Channel Ready", each side transitions the buffers from the initial "idle" state
57 	 * to the following target states:
58 	 *
59 	 * Tx buffer: "acquired"
60 	 * Rx buffer: "released"
61 	 */
62 	buffer_state_idle,
63 
64 	/*
65 	 * Beyond initialisation by "Channel Ready", buffers are in the "acquired" state once:
66 	 *
67 	 * 1. We dequeue a control command transferring the buffer to our ownership out of the KCS
68 	 *    interface, and
69 	 * 2. We are yet to complete all of our required accesses to the buffer
70 	 *
71 	 * * The Tx buffer enters the "acquired" state when we dequeue the "Rx Complete" command
72 	 * * The Rx buffer enters the "acquired" state when we dequeue the "Tx Begin" command
73 	 *
74 	 * It is a failure of implementation if it's possible for both sides to simultaneously
75 	 * consider a buffer as "acquired".
76 	 */
77 	buffer_state_acquired,
78 
79 	/*
80 	 * Buffers are in the "prepared" state when:
81 	 *
82 	 * 1. We have completed all of our required accesses (read or write) for the buffer, and
83 	 * 2. We have not yet successfully enqueued the control command to hand off ownership
84 	 */
85 	buffer_state_prepared,
86 
87 	/*
88 	 * Beyond initialisation by "Channel Ready", buffers are in the "released" state once:
89 	 *
90 	 * 1. We successfully enqueue the control command transferring ownership to the remote
91 	 *    side in to the KCS interface
92 	 *
93 	 * * The Tx buffer enters the "released" state when we enqueue the "Tx Begin" command
94 	 * * The Rx buffer enters the "released" state when we enqueue the "Rx Complete" command
95 	 *
96 	 * It may be the case that both sides simultaneously consider a buffer to be in the
97 	 * "released" state. However, if this is true, it must also be true that a buffer ownership
98 	 * transfer command has been enqueued in the KCS interface and is yet to be dequeued.
99 	 */
100 	buffer_state_released,
101 };
102 
103 struct mctp_astlpc_buffer {
104 	uint32_t offset;
105 	uint32_t size;
106 	enum mctp_astlpc_buffer_state state;
107 };
108 
109 struct mctp_astlpc_layout {
110 	struct mctp_astlpc_buffer rx;
111 	struct mctp_astlpc_buffer tx;
112 };
113 
114 struct mctp_astlpc_protocol {
115 	uint16_t version;
116 	uint32_t (*packet_size)(uint32_t body);
117 	uint32_t (*body_size)(uint32_t packet);
118 	void (*pktbuf_protect)(struct mctp_pktbuf *pkt);
119 	bool (*pktbuf_validate)(struct mctp_pktbuf *pkt);
120 };
121 
122 struct mctp_binding_astlpc {
123 	struct mctp_binding binding;
124 
125 	void *lpc_map;
126 	struct mctp_astlpc_layout layout;
127 
128 	uint8_t mode;
129 	uint32_t requested_mtu;
130 
131 	const struct mctp_astlpc_protocol *proto;
132 
133 	/* direct ops data */
134 	struct mctp_binding_astlpc_ops ops;
135 	void *ops_data;
136 
137 	/* fileio ops data */
138 	int kcs_fd;
139 	uint8_t kcs_status;
140 };
141 
142 #define binding_to_astlpc(b)                                                   \
143 	container_of(b, struct mctp_binding_astlpc, binding)
144 
145 #define astlpc_prlog(ctx, lvl, fmt, ...)                                       \
146 	do {                                                                   \
147 		bool __bmc = ((ctx)->mode == MCTP_BINDING_ASTLPC_MODE_BMC);    \
148 		mctp_prlog(lvl, pr_fmt("%s: " fmt), __bmc ? "bmc" : "host",    \
149 			   ##__VA_ARGS__);                                     \
150 	} while (0)
151 
152 #define astlpc_prerr(ctx, fmt, ...)                                            \
153 	astlpc_prlog(ctx, MCTP_LOG_ERR, fmt, ##__VA_ARGS__)
154 #define astlpc_prwarn(ctx, fmt, ...)                                           \
155 	astlpc_prlog(ctx, MCTP_LOG_WARNING, fmt, ##__VA_ARGS__)
156 #define astlpc_prinfo(ctx, fmt, ...)                                           \
157 	astlpc_prlog(ctx, MCTP_LOG_INFO, fmt, ##__VA_ARGS__)
158 #define astlpc_prdebug(ctx, fmt, ...)                                          \
159 	astlpc_prlog(ctx, MCTP_LOG_DEBUG, fmt, ##__VA_ARGS__)
160 
161 /* clang-format off */
162 #define ASTLPC_MCTP_MAGIC	0x4d435450
163 #define ASTLPC_VER_BAD	0
164 #define ASTLPC_VER_MIN	1
165 
166 /* Support testing of new binding protocols */
167 #ifndef ASTLPC_VER_CUR
168 #define ASTLPC_VER_CUR	3
169 #endif
170 /* clang-format on */
171 
172 #ifndef ARRAY_SIZE
173 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
174 #endif
175 
176 static uint32_t astlpc_packet_size_v1(uint32_t body)
177 {
178 	assert((body + 4) > body);
179 
180 	return body + 4;
181 }
182 
183 static uint32_t astlpc_body_size_v1(uint32_t packet)
184 {
185 	assert((packet - 4) < packet);
186 
187 	return packet - 4;
188 }
189 
190 void astlpc_pktbuf_protect_v1(struct mctp_pktbuf *pkt)
191 {
192 	(void)pkt;
193 }
194 
195 bool astlpc_pktbuf_validate_v1(struct mctp_pktbuf *pkt)
196 {
197 	(void)pkt;
198 	return true;
199 }
200 
201 static uint32_t astlpc_packet_size_v3(uint32_t body)
202 {
203 	assert((body + 4 + 4) > body);
204 
205 	return body + 4 + 4;
206 }
207 
208 static uint32_t astlpc_body_size_v3(uint32_t packet)
209 {
210 	assert((packet - 4 - 4) < packet);
211 
212 	return packet - 4 - 4;
213 }
214 
215 void astlpc_pktbuf_protect_v3(struct mctp_pktbuf *pkt)
216 {
217 	uint32_t code;
218 
219 	code = htobe32(crc32(mctp_pktbuf_hdr(pkt), mctp_pktbuf_size(pkt)));
220 	mctp_prdebug("%s: 0x%" PRIx32, __func__, code);
221 	mctp_pktbuf_push(pkt, &code, 4);
222 }
223 
224 bool astlpc_pktbuf_validate_v3(struct mctp_pktbuf *pkt)
225 {
226 	uint32_t code;
227 	void *check;
228 
229 	code = be32toh(crc32(mctp_pktbuf_hdr(pkt), mctp_pktbuf_size(pkt) - 4));
230 	mctp_prdebug("%s: 0x%" PRIx32, __func__, code);
231 	check = mctp_pktbuf_pop(pkt, 4);
232 	return check && !memcmp(&code, check, 4);
233 }
234 
235 static const struct mctp_astlpc_protocol astlpc_protocol_version[] = {
236 	[0] = {
237 		.version = 0,
238 		.packet_size = NULL,
239 		.body_size = NULL,
240 		.pktbuf_protect = NULL,
241 		.pktbuf_validate = NULL,
242 	},
243 	[1] = {
244 		.version = 1,
245 		.packet_size = astlpc_packet_size_v1,
246 		.body_size = astlpc_body_size_v1,
247 		.pktbuf_protect = astlpc_pktbuf_protect_v1,
248 		.pktbuf_validate = astlpc_pktbuf_validate_v1,
249 	},
250 	[2] = {
251 		.version = 2,
252 		.packet_size = astlpc_packet_size_v1,
253 		.body_size = astlpc_body_size_v1,
254 		.pktbuf_protect = astlpc_pktbuf_protect_v1,
255 		.pktbuf_validate = astlpc_pktbuf_validate_v1,
256 	},
257 	[3] = {
258 		.version = 3,
259 		.packet_size = astlpc_packet_size_v3,
260 		.body_size = astlpc_body_size_v3,
261 		.pktbuf_protect = astlpc_pktbuf_protect_v3,
262 		.pktbuf_validate = astlpc_pktbuf_validate_v3,
263 	},
264 };
265 
266 struct mctp_lpcmap_hdr {
267 	uint32_t magic;
268 
269 	uint16_t bmc_ver_min;
270 	uint16_t bmc_ver_cur;
271 	uint16_t host_ver_min;
272 	uint16_t host_ver_cur;
273 	uint16_t negotiated_ver;
274 	uint16_t pad0;
275 
276 	struct {
277 		uint32_t rx_offset;
278 		uint32_t rx_size;
279 		uint32_t tx_offset;
280 		uint32_t tx_size;
281 	} layout;
282 } __attribute__((packed));
283 
284 static const uint32_t control_size = 0x100;
285 
286 #define LPC_WIN_SIZE (1 * 1024 * 1024)
287 
288 #define KCS_STATUS_BMC_READY	  0x80
289 #define KCS_STATUS_CHANNEL_ACTIVE 0x40
290 #define KCS_STATUS_IBF		  0x02
291 #define KCS_STATUS_OBF		  0x01
292 
293 static inline int mctp_astlpc_kcs_write(struct mctp_binding_astlpc *astlpc,
294 					enum mctp_binding_astlpc_kcs_reg reg,
295 					uint8_t val)
296 {
297 	return astlpc->ops.kcs_write(astlpc->ops_data, reg, val);
298 }
299 
300 static inline int mctp_astlpc_kcs_read(struct mctp_binding_astlpc *astlpc,
301 				       enum mctp_binding_astlpc_kcs_reg reg,
302 				       uint8_t *val)
303 {
304 	return astlpc->ops.kcs_read(astlpc->ops_data, reg, val);
305 }
306 
307 static inline int mctp_astlpc_lpc_write(struct mctp_binding_astlpc *astlpc,
308 					const void *buf, long offset,
309 					size_t len)
310 {
311 	astlpc_prdebug(astlpc, "%s: %zu bytes to 0x%lx", __func__, len, offset);
312 
313 	assert(offset >= 0);
314 
315 	/* Indirect access */
316 	if (astlpc->ops.lpc_write) {
317 		void *data = astlpc->ops_data;
318 
319 		return astlpc->ops.lpc_write(data, buf, offset, len);
320 	}
321 
322 	/* Direct mapping */
323 	assert(astlpc->lpc_map);
324 	memcpy(&((char *)astlpc->lpc_map)[offset], buf, len);
325 
326 	return 0;
327 }
328 
329 static inline int mctp_astlpc_lpc_read(struct mctp_binding_astlpc *astlpc,
330 				       void *buf, long offset, size_t len)
331 {
332 	astlpc_prdebug(astlpc, "%s: %zu bytes from 0x%lx", __func__, len,
333 		       offset);
334 
335 	assert(offset >= 0);
336 
337 	/* Indirect access */
338 	if (astlpc->ops.lpc_read) {
339 		void *data = astlpc->ops_data;
340 
341 		return astlpc->ops.lpc_read(data, buf, offset, len);
342 	}
343 
344 	/* Direct mapping */
345 	assert(astlpc->lpc_map);
346 	memcpy(buf, &((char *)astlpc->lpc_map)[offset], len);
347 
348 	return 0;
349 }
350 
351 static int mctp_astlpc_kcs_set_status(struct mctp_binding_astlpc *astlpc,
352 				      uint8_t status)
353 {
354 	uint8_t data;
355 	int rc;
356 
357 	/* Since we're setting the status register, we want the other endpoint
358 	 * to be interrupted. However, some hardware may only raise a host-side
359 	 * interrupt on an ODR event.
360 	 * So, write a dummy value of 0xff to ODR, which will ensure that an
361 	 * interrupt is triggered, and can be ignored by the host.
362 	 */
363 	data = cmd_dummy_value;
364 
365 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, status);
366 	if (rc) {
367 		astlpc_prwarn(astlpc, "KCS status write failed");
368 		return -1;
369 	}
370 
371 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, data);
372 	if (rc) {
373 		astlpc_prwarn(astlpc, "KCS dummy data write failed");
374 		return -1;
375 	}
376 
377 	return 0;
378 }
379 
380 static int mctp_astlpc_layout_read(struct mctp_binding_astlpc *astlpc,
381 				   struct mctp_astlpc_layout *layout)
382 {
383 	struct mctp_lpcmap_hdr hdr;
384 	int rc;
385 
386 	rc = mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
387 	if (rc < 0)
388 		return rc;
389 
390 	/* Flip the buffers as the names are defined in terms of the host */
391 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) {
392 		layout->rx.offset = be32toh(hdr.layout.tx_offset);
393 		layout->rx.size = be32toh(hdr.layout.tx_size);
394 		layout->tx.offset = be32toh(hdr.layout.rx_offset);
395 		layout->tx.size = be32toh(hdr.layout.rx_size);
396 	} else {
397 		assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
398 
399 		layout->rx.offset = be32toh(hdr.layout.rx_offset);
400 		layout->rx.size = be32toh(hdr.layout.rx_size);
401 		layout->tx.offset = be32toh(hdr.layout.tx_offset);
402 		layout->tx.size = be32toh(hdr.layout.tx_size);
403 	}
404 
405 	return 0;
406 }
407 
408 static int mctp_astlpc_layout_write(struct mctp_binding_astlpc *astlpc,
409 				    struct mctp_astlpc_layout *layout)
410 {
411 	uint32_t rx_size_be;
412 
413 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) {
414 		struct mctp_lpcmap_hdr hdr;
415 
416 		/*
417 		 * Flip the buffers as the names are defined in terms of the
418 		 * host
419 		 */
420 		hdr.layout.rx_offset = htobe32(layout->tx.offset);
421 		hdr.layout.rx_size = htobe32(layout->tx.size);
422 		hdr.layout.tx_offset = htobe32(layout->rx.offset);
423 		hdr.layout.tx_size = htobe32(layout->rx.size);
424 
425 		return mctp_astlpc_lpc_write(astlpc, &hdr.layout,
426 					     offsetof(struct mctp_lpcmap_hdr,
427 						      layout),
428 					     sizeof(hdr.layout));
429 	}
430 
431 	assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
432 
433 	/*
434 	 * As of v2 we only need to write rx_size - the offsets are controlled
435 	 * by the BMC, as is the BMC's rx_size (host tx_size).
436 	 */
437 	rx_size_be = htobe32(layout->rx.size);
438 	return mctp_astlpc_lpc_write(astlpc, &rx_size_be,
439 				     offsetof(struct mctp_lpcmap_hdr,
440 					      layout.rx_size),
441 				     sizeof(rx_size_be));
442 }
443 
444 static bool
445 mctp_astlpc_buffer_validate(const struct mctp_binding_astlpc *astlpc,
446 			    const struct mctp_astlpc_buffer *buf,
447 			    const char *name)
448 {
449 	/* Check for overflow */
450 	if (buf->offset + buf->size < buf->offset) {
451 		mctp_prerr(
452 			"%s packet buffer parameters overflow: offset: 0x%" PRIx32
453 			", size: %" PRIu32,
454 			name, buf->offset, buf->size);
455 		return false;
456 	}
457 
458 	/* Check that the buffers are contained within the allocated space */
459 	if (buf->offset + buf->size > LPC_WIN_SIZE) {
460 		mctp_prerr(
461 			"%s packet buffer parameters exceed %uM window size: offset: 0x%" PRIx32
462 			", size: %" PRIu32,
463 			name, (LPC_WIN_SIZE / (1024 * 1024)), buf->offset,
464 			buf->size);
465 		return false;
466 	}
467 
468 	/* Check that the baseline transmission unit is supported */
469 	if (buf->size <
470 	    astlpc->proto->packet_size(MCTP_PACKET_SIZE(MCTP_BTU))) {
471 		mctp_prerr(
472 			"%s packet buffer too small: Require %" PRIu32
473 			" bytes to support the %u byte baseline transmission unit, found %" PRIu32,
474 			name,
475 			astlpc->proto->packet_size(MCTP_PACKET_SIZE(MCTP_BTU)),
476 			MCTP_BTU, buf->size);
477 		return false;
478 	}
479 
480 	/* Check for overlap with the control space */
481 	if (buf->offset < control_size) {
482 		mctp_prerr(
483 			"%s packet buffer overlaps control region {0x%" PRIx32
484 			", %" PRIu32 "}: Rx {0x%" PRIx32 ", %" PRIu32 "}",
485 			name, 0U, control_size, buf->offset, buf->size);
486 		return false;
487 	}
488 
489 	return true;
490 }
491 
492 static bool
493 mctp_astlpc_layout_validate(const struct mctp_binding_astlpc *astlpc,
494 			    const struct mctp_astlpc_layout *layout)
495 {
496 	const struct mctp_astlpc_buffer *rx = &layout->rx;
497 	const struct mctp_astlpc_buffer *tx = &layout->tx;
498 	bool rx_valid, tx_valid;
499 
500 	rx_valid = mctp_astlpc_buffer_validate(astlpc, rx, "Rx");
501 	tx_valid = mctp_astlpc_buffer_validate(astlpc, tx, "Tx");
502 
503 	if (!(rx_valid && tx_valid))
504 		return false;
505 
506 	/* Check that the buffers are disjoint */
507 	if ((rx->offset <= tx->offset && rx->offset + rx->size > tx->offset) ||
508 	    (tx->offset <= rx->offset && tx->offset + tx->size > rx->offset)) {
509 		mctp_prerr("Rx and Tx packet buffers overlap: Rx {0x%" PRIx32
510 			   ", %" PRIu32 "}, Tx {0x%" PRIx32 ", %" PRIu32 "}",
511 			   rx->offset, rx->size, tx->offset, tx->size);
512 		return false;
513 	}
514 
515 	return true;
516 }
517 
518 static int mctp_astlpc_init_bmc(struct mctp_binding_astlpc *astlpc)
519 {
520 	struct mctp_lpcmap_hdr hdr = { 0 };
521 	uint8_t status;
522 	uint32_t sz;
523 
524 	/*
525 	 * The largest buffer size is half of the allocated MCTP space
526 	 * excluding the control space.
527 	 */
528 	sz = ((LPC_WIN_SIZE - control_size) / 2);
529 
530 	/*
531 	 * Trim the MTU to a multiple of 16 to meet the requirements of 12.17
532 	 * Query Hop in DSP0236 v1.3.0.
533 	 */
534 	sz = MCTP_BODY_SIZE(astlpc->proto->body_size(sz));
535 	sz &= ~0xfUL;
536 	sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(sz));
537 
538 	if (astlpc->requested_mtu) {
539 		uint32_t rpkt, rmtu;
540 
541 		rmtu = astlpc->requested_mtu;
542 		rpkt = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu));
543 		sz = MIN(sz, rpkt);
544 	}
545 
546 	/* Flip the buffers as the names are defined in terms of the host */
547 	astlpc->layout.tx.offset = control_size;
548 	astlpc->layout.tx.size = sz;
549 	astlpc->layout.rx.offset =
550 		astlpc->layout.tx.offset + astlpc->layout.tx.size;
551 	astlpc->layout.rx.size = sz;
552 
553 	if (!mctp_astlpc_layout_validate(astlpc, &astlpc->layout)) {
554 		astlpc_prerr(astlpc, "Cannot support an MTU of %" PRIu32, sz);
555 		return -EINVAL;
556 	}
557 
558 	hdr = (struct mctp_lpcmap_hdr){
559 		.magic = htobe32(ASTLPC_MCTP_MAGIC),
560 		.bmc_ver_min = htobe16(ASTLPC_VER_MIN),
561 		.bmc_ver_cur = htobe16(ASTLPC_VER_CUR),
562 
563 		/* Flip the buffers back as we're now describing the host's
564 		 * configuration to the host */
565 		.layout.rx_offset = htobe32(astlpc->layout.tx.offset),
566 		.layout.rx_size = htobe32(astlpc->layout.tx.size),
567 		.layout.tx_offset = htobe32(astlpc->layout.rx.offset),
568 		.layout.tx_size = htobe32(astlpc->layout.rx.size),
569 	};
570 
571 	mctp_astlpc_lpc_write(astlpc, &hdr, 0, sizeof(hdr));
572 
573 	/*
574 	 * Set status indicating that the BMC is now active. Be explicit about
575 	 * clearing OBF; we're reinitialising the binding and so any previous
576 	 * buffer state is irrelevant.
577 	 */
578 	status = KCS_STATUS_BMC_READY & ~KCS_STATUS_OBF;
579 	return mctp_astlpc_kcs_set_status(astlpc, status);
580 }
581 
582 static int mctp_binding_astlpc_start_bmc(struct mctp_binding *b)
583 {
584 	struct mctp_binding_astlpc *astlpc =
585 		container_of(b, struct mctp_binding_astlpc, binding);
586 
587 	astlpc->proto = &astlpc_protocol_version[ASTLPC_VER_CUR];
588 
589 	return mctp_astlpc_init_bmc(astlpc);
590 }
591 
592 static bool mctp_astlpc_validate_version(uint16_t bmc_ver_min,
593 					 uint16_t bmc_ver_cur,
594 					 uint16_t host_ver_min,
595 					 uint16_t host_ver_cur)
596 {
597 	if (!(bmc_ver_min && bmc_ver_cur && host_ver_min && host_ver_cur)) {
598 		mctp_prerr("Invalid version present in [%" PRIu16 ", %" PRIu16
599 			   "], [%" PRIu16 ", %" PRIu16 "]",
600 			   bmc_ver_min, bmc_ver_cur, host_ver_min,
601 			   host_ver_cur);
602 		return false;
603 	} else if (bmc_ver_min > bmc_ver_cur) {
604 		mctp_prerr("Invalid bmc version range [%" PRIu16 ", %" PRIu16
605 			   "]",
606 			   bmc_ver_min, bmc_ver_cur);
607 		return false;
608 	} else if (host_ver_min > host_ver_cur) {
609 		mctp_prerr("Invalid host version range [%" PRIu16 ", %" PRIu16
610 			   "]",
611 			   host_ver_min, host_ver_cur);
612 		return false;
613 	} else if ((host_ver_cur < bmc_ver_min) ||
614 		   (host_ver_min > bmc_ver_cur)) {
615 		mctp_prerr(
616 			"Unable to satisfy version negotiation with ranges [%" PRIu16
617 			", %" PRIu16 "] and [%" PRIu16 ", %" PRIu16 "]",
618 			bmc_ver_min, bmc_ver_cur, host_ver_min, host_ver_cur);
619 		return false;
620 	}
621 
622 	return true;
623 }
624 
625 static int mctp_astlpc_negotiate_layout_host(struct mctp_binding_astlpc *astlpc)
626 {
627 	struct mctp_astlpc_layout layout;
628 	uint32_t rmtu;
629 	uint32_t sz;
630 	int rc;
631 
632 	rc = mctp_astlpc_layout_read(astlpc, &layout);
633 	if (rc < 0)
634 		return rc;
635 
636 	if (!mctp_astlpc_layout_validate(astlpc, &layout)) {
637 		astlpc_prerr(
638 			astlpc,
639 			"BMC provided invalid buffer layout: Rx {0x%" PRIx32
640 			", %" PRIu32 "}, Tx {0x%" PRIx32 ", %" PRIu32 "}",
641 			layout.rx.offset, layout.rx.size, layout.tx.offset,
642 			layout.tx.size);
643 		return -EINVAL;
644 	}
645 
646 	astlpc_prinfo(astlpc, "Desire an MTU of %" PRIu32 " bytes",
647 		      astlpc->requested_mtu);
648 
649 	rmtu = astlpc->requested_mtu;
650 	sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu));
651 	layout.rx.size = sz;
652 
653 	if (!mctp_astlpc_layout_validate(astlpc, &layout)) {
654 		astlpc_prerr(
655 			astlpc,
656 			"Generated invalid buffer layout with size %" PRIu32
657 			": Rx {0x%" PRIx32 ", %" PRIu32 "}, Tx {0x%" PRIx32
658 			", %" PRIu32 "}",
659 			sz, layout.rx.offset, layout.rx.size, layout.tx.offset,
660 			layout.tx.size);
661 		return -EINVAL;
662 	}
663 
664 	astlpc_prinfo(astlpc, "Requesting MTU of %" PRIu32 " bytes",
665 		      astlpc->requested_mtu);
666 
667 	return mctp_astlpc_layout_write(astlpc, &layout);
668 }
669 
670 static uint16_t mctp_astlpc_negotiate_version(uint16_t bmc_ver_min,
671 					      uint16_t bmc_ver_cur,
672 					      uint16_t host_ver_min,
673 					      uint16_t host_ver_cur)
674 {
675 	if (!mctp_astlpc_validate_version(bmc_ver_min, bmc_ver_cur,
676 					  host_ver_min, host_ver_cur))
677 		return ASTLPC_VER_BAD;
678 
679 	if (bmc_ver_cur < host_ver_cur)
680 		return bmc_ver_cur;
681 
682 	return host_ver_cur;
683 }
684 
685 static int mctp_astlpc_init_host(struct mctp_binding_astlpc *astlpc)
686 {
687 	const uint16_t ver_min_be = htobe16(ASTLPC_VER_MIN);
688 	const uint16_t ver_cur_be = htobe16(ASTLPC_VER_CUR);
689 	uint16_t bmc_ver_min, bmc_ver_cur, negotiated;
690 	struct mctp_lpcmap_hdr hdr;
691 	uint8_t status;
692 	int rc;
693 
694 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status);
695 	if (rc) {
696 		mctp_prwarn("KCS status read failed");
697 		return rc;
698 	}
699 
700 	astlpc->kcs_status = status;
701 
702 	if (!(status & KCS_STATUS_BMC_READY))
703 		return -EHOSTDOWN;
704 
705 	mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
706 
707 	bmc_ver_min = be16toh(hdr.bmc_ver_min);
708 	bmc_ver_cur = be16toh(hdr.bmc_ver_cur);
709 
710 	/* Calculate the expected value of negotiated_ver */
711 	negotiated = mctp_astlpc_negotiate_version(
712 		bmc_ver_min, bmc_ver_cur, ASTLPC_VER_MIN, ASTLPC_VER_CUR);
713 	if (!negotiated) {
714 		astlpc_prerr(astlpc, "Cannot negotiate with invalid versions");
715 		return -EINVAL;
716 	}
717 
718 	/* Assign protocol ops so we can calculate the packet buffer sizes */
719 	assert(negotiated < ARRAY_SIZE(astlpc_protocol_version));
720 	astlpc->proto = &astlpc_protocol_version[negotiated];
721 
722 	/* Negotiate packet buffers in v2 style if the BMC supports it */
723 	if (negotiated >= 2) {
724 		rc = mctp_astlpc_negotiate_layout_host(astlpc);
725 		if (rc < 0)
726 			return rc;
727 	}
728 
729 	/* Advertise the host's supported protocol versions */
730 	mctp_astlpc_lpc_write(astlpc, &ver_min_be,
731 			      offsetof(struct mctp_lpcmap_hdr, host_ver_min),
732 			      sizeof(ver_min_be));
733 
734 	mctp_astlpc_lpc_write(astlpc, &ver_cur_be,
735 			      offsetof(struct mctp_lpcmap_hdr, host_ver_cur),
736 			      sizeof(ver_cur_be));
737 
738 	/* Send channel init command */
739 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, 0x0);
740 	if (rc) {
741 		astlpc_prwarn(astlpc, "KCS write failed");
742 	}
743 
744 	/*
745 	 * Configure the host so `astlpc->proto->version == 0` holds until we
746 	 * receive a subsequent status update from the BMC. Until then,
747 	 * `astlpc->proto->version == 0` indicates that we're yet to complete
748 	 * the channel initialisation handshake.
749 	 *
750 	 * When the BMC provides a status update with KCS_STATUS_CHANNEL_ACTIVE
751 	 * set we will assign the appropriate protocol ops struct in accordance
752 	 * with `negotiated_ver`.
753 	 */
754 	astlpc->proto = &astlpc_protocol_version[ASTLPC_VER_BAD];
755 
756 	return rc;
757 }
758 
759 static int mctp_binding_astlpc_start_host(struct mctp_binding *b)
760 {
761 	struct mctp_binding_astlpc *astlpc =
762 		container_of(b, struct mctp_binding_astlpc, binding);
763 
764 	return mctp_astlpc_init_host(astlpc);
765 }
766 
767 static bool __mctp_astlpc_kcs_ready(struct mctp_binding_astlpc *astlpc,
768 				    uint8_t status, bool is_write)
769 {
770 	bool is_bmc;
771 	bool ready_state;
772 	uint8_t flag;
773 
774 	is_bmc = (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC);
775 	flag = (is_bmc ^ is_write) ? KCS_STATUS_IBF : KCS_STATUS_OBF;
776 	ready_state = is_write ? 0 : 1;
777 
778 	return !!(status & flag) == ready_state;
779 }
780 
781 static inline bool
782 mctp_astlpc_kcs_read_ready(struct mctp_binding_astlpc *astlpc, uint8_t status)
783 {
784 	return __mctp_astlpc_kcs_ready(astlpc, status, false);
785 }
786 
787 static inline bool
788 mctp_astlpc_kcs_write_ready(struct mctp_binding_astlpc *astlpc, uint8_t status)
789 {
790 	return __mctp_astlpc_kcs_ready(astlpc, status, true);
791 }
792 
793 static int mctp_astlpc_kcs_send(struct mctp_binding_astlpc *astlpc,
794 				enum mctp_astlpc_cmd data)
795 {
796 	uint8_t status;
797 	int rc;
798 
799 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status);
800 	if (rc) {
801 		astlpc_prwarn(astlpc, "KCS status read failed");
802 		return -EIO;
803 	}
804 	if (!mctp_astlpc_kcs_write_ready(astlpc, status))
805 		return -EBUSY;
806 
807 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, data);
808 	if (rc) {
809 		astlpc_prwarn(astlpc, "KCS data write failed");
810 		return -EIO;
811 	}
812 
813 	return 0;
814 }
815 
816 static int mctp_binding_astlpc_tx(struct mctp_binding *b,
817 				  struct mctp_pktbuf *pkt)
818 {
819 	struct mctp_binding_astlpc *astlpc = binding_to_astlpc(b);
820 	uint32_t len, len_be;
821 	struct mctp_hdr *hdr;
822 	int rc;
823 
824 	hdr = mctp_pktbuf_hdr(pkt);
825 	len = mctp_pktbuf_size(pkt);
826 
827 	astlpc_prdebug(astlpc,
828 		       "%s: Transmitting %" PRIu32
829 		       "-byte packet (%hhu, %hhu, 0x%hhx)",
830 		       __func__, len, hdr->src, hdr->dest, hdr->flags_seq_tag);
831 
832 	if (len > astlpc->proto->body_size(astlpc->layout.tx.size)) {
833 		astlpc_prwarn(astlpc, "invalid TX len %" PRIu32 ": %" PRIu32,
834 			      len,
835 			      astlpc->proto->body_size(astlpc->layout.tx.size));
836 		return -EMSGSIZE;
837 	}
838 
839 	mctp_binding_set_tx_enabled(b, false);
840 
841 	len_be = htobe32(len);
842 	mctp_astlpc_lpc_write(astlpc, &len_be, astlpc->layout.tx.offset,
843 			      sizeof(len_be));
844 
845 	astlpc->proto->pktbuf_protect(pkt);
846 	len = mctp_pktbuf_size(pkt);
847 
848 	mctp_astlpc_lpc_write(astlpc, hdr, astlpc->layout.tx.offset + 4, len);
849 
850 	astlpc->layout.tx.state = buffer_state_prepared;
851 
852 	rc = mctp_astlpc_kcs_send(astlpc, cmd_tx_begin);
853 	if (!rc)
854 		astlpc->layout.tx.state = buffer_state_released;
855 
856 	return rc == -EBUSY ? 0 : rc;
857 }
858 
859 static uint32_t mctp_astlpc_calculate_mtu(struct mctp_binding_astlpc *astlpc,
860 					  struct mctp_astlpc_layout *layout)
861 {
862 	uint32_t low, high, limit, rpkt;
863 
864 	/* Derive the largest MTU the BMC _can_ support */
865 	low = MIN(astlpc->layout.rx.offset, astlpc->layout.tx.offset);
866 	high = MAX(astlpc->layout.rx.offset, astlpc->layout.tx.offset);
867 	limit = high - low;
868 
869 	/* Determine the largest MTU the BMC _wants_ to support */
870 	if (astlpc->requested_mtu) {
871 		uint32_t rmtu = astlpc->requested_mtu;
872 
873 		rpkt = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu));
874 		limit = MIN(limit, rpkt);
875 	}
876 
877 	/* Determine the accepted MTU, applied both directions by convention */
878 	rpkt = MIN(limit, layout->tx.size);
879 	return MCTP_BODY_SIZE(astlpc->proto->body_size(rpkt));
880 }
881 
882 static int mctp_astlpc_negotiate_layout_bmc(struct mctp_binding_astlpc *astlpc)
883 {
884 	struct mctp_astlpc_layout proposed, pending;
885 	uint32_t sz, mtu;
886 	int rc;
887 
888 	/* Do we have a valid protocol version? */
889 	if (!astlpc->proto->version)
890 		return -EINVAL;
891 
892 	/* Extract the host's proposed layout */
893 	rc = mctp_astlpc_layout_read(astlpc, &proposed);
894 	if (rc < 0)
895 		return rc;
896 
897 	/* Do we have a reasonable layout? */
898 	if (!mctp_astlpc_layout_validate(astlpc, &proposed))
899 		return -EINVAL;
900 
901 	/* Negotiate the MTU */
902 	mtu = mctp_astlpc_calculate_mtu(astlpc, &proposed);
903 	sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(mtu));
904 
905 	/*
906 	 * Use symmetric MTUs by convention and to pass constraints in rx/tx
907 	 * functions
908 	 */
909 	pending = astlpc->layout;
910 	pending.tx.size = sz;
911 	pending.rx.size = sz;
912 
913 	if (mctp_astlpc_layout_validate(astlpc, &pending)) {
914 		/* We found a sensible Rx MTU, so honour it */
915 		astlpc->layout = pending;
916 
917 		/* Enforce the negotiated MTU */
918 		rc = mctp_astlpc_layout_write(astlpc, &astlpc->layout);
919 		if (rc < 0)
920 			return rc;
921 
922 		astlpc_prinfo(astlpc, "Negotiated an MTU of %" PRIu32 " bytes",
923 			      mtu);
924 	} else {
925 		astlpc_prwarn(astlpc, "MTU negotiation failed");
926 		return -EINVAL;
927 	}
928 
929 	if (astlpc->proto->version >= 2)
930 		astlpc->binding.pkt_size = MCTP_PACKET_SIZE(mtu);
931 
932 	return 0;
933 }
934 
935 static void mctp_astlpc_init_channel(struct mctp_binding_astlpc *astlpc)
936 {
937 	uint16_t negotiated, negotiated_be;
938 	struct mctp_lpcmap_hdr hdr;
939 	uint8_t status;
940 	int rc;
941 
942 	mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
943 
944 	/* Version negotiation */
945 	negotiated = mctp_astlpc_negotiate_version(ASTLPC_VER_MIN,
946 						   ASTLPC_VER_CUR,
947 						   be16toh(hdr.host_ver_min),
948 						   be16toh(hdr.host_ver_cur));
949 
950 	/* MTU negotiation requires knowing which protocol we'll use */
951 	assert(negotiated < ARRAY_SIZE(astlpc_protocol_version));
952 	astlpc->proto = &astlpc_protocol_version[negotiated];
953 
954 	/* Host Rx MTU negotiation: Failure terminates channel init */
955 	rc = mctp_astlpc_negotiate_layout_bmc(astlpc);
956 	if (rc < 0)
957 		negotiated = ASTLPC_VER_BAD;
958 
959 	/* Populate the negotiated version */
960 	negotiated_be = htobe16(negotiated);
961 	mctp_astlpc_lpc_write(astlpc, &negotiated_be,
962 			      offsetof(struct mctp_lpcmap_hdr, negotiated_ver),
963 			      sizeof(negotiated_be));
964 
965 	/* Track buffer ownership */
966 	astlpc->layout.tx.state = buffer_state_acquired;
967 	astlpc->layout.rx.state = buffer_state_released;
968 
969 	/* Finalise the configuration */
970 	status = KCS_STATUS_BMC_READY | KCS_STATUS_OBF;
971 	if (negotiated > 0) {
972 		astlpc_prinfo(astlpc, "Negotiated binding version %" PRIu16,
973 			      negotiated);
974 		status |= KCS_STATUS_CHANNEL_ACTIVE;
975 	} else {
976 		astlpc_prerr(astlpc, "Failed to initialise channel");
977 	}
978 
979 	mctp_astlpc_kcs_set_status(astlpc, status);
980 
981 	mctp_binding_set_tx_enabled(&astlpc->binding,
982 				    status & KCS_STATUS_CHANNEL_ACTIVE);
983 }
984 
985 static void mctp_astlpc_rx_start(struct mctp_binding_astlpc *astlpc)
986 {
987 	struct mctp_pktbuf *pkt;
988 	struct mctp_hdr *hdr;
989 	uint32_t body, packet;
990 
991 	mctp_astlpc_lpc_read(astlpc, &body, astlpc->layout.rx.offset,
992 			     sizeof(body));
993 	body = be32toh(body);
994 
995 	if (body > astlpc->proto->body_size(astlpc->layout.rx.size)) {
996 		astlpc_prwarn(astlpc, "invalid RX len 0x%x", body);
997 		return;
998 	}
999 
1000 	if ((size_t)body > astlpc->binding.pkt_size) {
1001 		astlpc_prwarn(astlpc, "invalid RX len 0x%x", body);
1002 		return;
1003 	}
1004 
1005 	/* Eliminate the medium-specific header that we just read */
1006 	packet = astlpc->proto->packet_size(body) - 4;
1007 	pkt = mctp_pktbuf_alloc(&astlpc->binding, packet);
1008 	if (!pkt) {
1009 		astlpc_prwarn(astlpc, "unable to allocate pktbuf len 0x%x",
1010 			      packet);
1011 		return;
1012 	}
1013 
1014 	/*
1015 	 * Read payload and medium-specific trailer from immediately after the
1016 	 * medium-specific header.
1017 	 */
1018 	mctp_astlpc_lpc_read(astlpc, mctp_pktbuf_hdr(pkt),
1019 			     astlpc->layout.rx.offset + 4, packet);
1020 
1021 	astlpc->layout.rx.state = buffer_state_prepared;
1022 
1023 	/* Inform the other side of the MCTP interface that we have read
1024 	 * the packet off the bus before handling the contents of the packet.
1025 	 */
1026 	if (!mctp_astlpc_kcs_send(astlpc, cmd_rx_complete))
1027 		astlpc->layout.rx.state = buffer_state_released;
1028 
1029 	hdr = mctp_pktbuf_hdr(pkt);
1030 	if (hdr->ver != 1) {
1031 		mctp_pktbuf_free(pkt);
1032 		astlpc_prdebug(astlpc, "Dropped packet with invalid version");
1033 		return;
1034 	}
1035 
1036 	/*
1037 	 * v3 will validate the CRC32 in the medium-specific trailer and adjust
1038 	 * the packet size accordingly. On older protocols validation is a no-op
1039 	 * that always returns true.
1040 	 */
1041 	if (astlpc->proto->pktbuf_validate(pkt)) {
1042 		mctp_bus_rx(&astlpc->binding, pkt);
1043 	} else {
1044 		/* TODO: Drop any associated assembly */
1045 		mctp_pktbuf_free(pkt);
1046 		astlpc_prdebug(astlpc, "Dropped corrupt packet");
1047 	}
1048 }
1049 
1050 static void mctp_astlpc_tx_complete(struct mctp_binding_astlpc *astlpc)
1051 {
1052 	astlpc->layout.tx.state = buffer_state_acquired;
1053 	mctp_binding_set_tx_enabled(&astlpc->binding, true);
1054 }
1055 
1056 static int mctp_astlpc_finalise_channel(struct mctp_binding_astlpc *astlpc)
1057 {
1058 	struct mctp_astlpc_layout layout;
1059 	uint16_t negotiated;
1060 	int rc;
1061 
1062 	rc = mctp_astlpc_lpc_read(astlpc, &negotiated,
1063 				  offsetof(struct mctp_lpcmap_hdr,
1064 					   negotiated_ver),
1065 				  sizeof(negotiated));
1066 	if (rc < 0)
1067 		return rc;
1068 
1069 	negotiated = be16toh(negotiated);
1070 	astlpc_prerr(astlpc, "Version negotiation got: %u", negotiated);
1071 
1072 	if (negotiated == ASTLPC_VER_BAD || negotiated < ASTLPC_VER_MIN ||
1073 	    negotiated > ASTLPC_VER_CUR) {
1074 		astlpc_prerr(astlpc, "Failed to negotiate version, got: %u\n",
1075 			     negotiated);
1076 		return -EINVAL;
1077 	}
1078 
1079 	assert(negotiated < ARRAY_SIZE(astlpc_protocol_version));
1080 	astlpc->proto = &astlpc_protocol_version[negotiated];
1081 
1082 	rc = mctp_astlpc_layout_read(astlpc, &layout);
1083 	if (rc < 0)
1084 		return rc;
1085 
1086 	if (!mctp_astlpc_layout_validate(astlpc, &layout)) {
1087 		mctp_prerr("BMC proposed invalid buffer parameters");
1088 		return -EINVAL;
1089 	}
1090 
1091 	astlpc->layout = layout;
1092 
1093 	if (negotiated >= 2)
1094 		astlpc->binding.pkt_size =
1095 			astlpc->proto->body_size(astlpc->layout.tx.size);
1096 
1097 	/* Track buffer ownership */
1098 	astlpc->layout.tx.state = buffer_state_acquired;
1099 	astlpc->layout.rx.state = buffer_state_released;
1100 
1101 	return 0;
1102 }
1103 
1104 static int mctp_astlpc_update_channel(struct mctp_binding_astlpc *astlpc,
1105 				      uint8_t status)
1106 {
1107 	uint8_t updated;
1108 	int rc = 0;
1109 
1110 	assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
1111 
1112 	updated = astlpc->kcs_status ^ status;
1113 
1114 	astlpc_prdebug(astlpc, "%s: status: 0x%x, update: 0x%x", __func__,
1115 		       status, updated);
1116 
1117 	if (updated & KCS_STATUS_BMC_READY) {
1118 		if (status & KCS_STATUS_BMC_READY) {
1119 			astlpc->kcs_status = status;
1120 			return astlpc->binding.start(&astlpc->binding);
1121 		} else {
1122 			/* Shut down the channel */
1123 			astlpc->layout.rx.state = buffer_state_idle;
1124 			astlpc->layout.tx.state = buffer_state_idle;
1125 			mctp_binding_set_tx_enabled(&astlpc->binding, false);
1126 		}
1127 	}
1128 
1129 	if (astlpc->proto->version == 0 ||
1130 	    updated & KCS_STATUS_CHANNEL_ACTIVE) {
1131 		bool enable;
1132 
1133 		astlpc->layout.rx.state = buffer_state_idle;
1134 		astlpc->layout.tx.state = buffer_state_idle;
1135 		rc = mctp_astlpc_finalise_channel(astlpc);
1136 		enable = (status & KCS_STATUS_CHANNEL_ACTIVE) && rc == 0;
1137 		mctp_binding_set_tx_enabled(&astlpc->binding, enable);
1138 	}
1139 
1140 	astlpc->kcs_status = status;
1141 
1142 	return rc;
1143 }
1144 
1145 bool mctp_astlpc_tx_done(struct mctp_binding_astlpc *astlpc)
1146 {
1147 	return astlpc->layout.tx.state == buffer_state_acquired;
1148 }
1149 
1150 int mctp_astlpc_poll(struct mctp_binding_astlpc *astlpc)
1151 {
1152 	uint8_t status, data;
1153 	int rc;
1154 
1155 	if (astlpc->layout.rx.state == buffer_state_prepared)
1156 		if (!mctp_astlpc_kcs_send(astlpc, cmd_rx_complete))
1157 			astlpc->layout.rx.state = buffer_state_released;
1158 
1159 	if (astlpc->layout.tx.state == buffer_state_prepared)
1160 		if (!mctp_astlpc_kcs_send(astlpc, cmd_tx_begin))
1161 			astlpc->layout.tx.state = buffer_state_released;
1162 
1163 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status);
1164 	if (rc) {
1165 		astlpc_prwarn(astlpc, "KCS read error");
1166 		return -1;
1167 	}
1168 
1169 	astlpc_prdebug(astlpc, "%s: status: 0x%hhx", __func__, status);
1170 
1171 	if (!mctp_astlpc_kcs_read_ready(astlpc, status))
1172 		return 0;
1173 
1174 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_DATA, &data);
1175 	if (rc) {
1176 		astlpc_prwarn(astlpc, "KCS data read error");
1177 		return -1;
1178 	}
1179 
1180 	astlpc_prdebug(astlpc, "%s: data: 0x%hhx", __func__, data);
1181 
1182 	if (!astlpc->proto->version &&
1183 	    !(data == cmd_initialise || data == cmd_dummy_value)) {
1184 		astlpc_prwarn(astlpc, "Invalid message for binding state: 0x%x",
1185 			      data);
1186 		return 0;
1187 	}
1188 
1189 	switch (data) {
1190 	case cmd_initialise:
1191 		mctp_astlpc_init_channel(astlpc);
1192 		break;
1193 	case cmd_tx_begin:
1194 		if (astlpc->layout.rx.state != buffer_state_released) {
1195 			astlpc_prerr(
1196 				astlpc,
1197 				"Protocol error: Invalid Rx buffer state for event %d: %d\n",
1198 				data, astlpc->layout.rx.state);
1199 			return 0;
1200 		}
1201 		mctp_astlpc_rx_start(astlpc);
1202 		break;
1203 	case cmd_rx_complete:
1204 		if (astlpc->layout.tx.state != buffer_state_released) {
1205 			astlpc_prerr(
1206 				astlpc,
1207 				"Protocol error: Invalid Tx buffer state for event %d: %d\n",
1208 				data, astlpc->layout.tx.state);
1209 			return 0;
1210 		}
1211 		mctp_astlpc_tx_complete(astlpc);
1212 		break;
1213 	case cmd_dummy_value:
1214 		/* No responsibilities for the BMC on 0xff */
1215 		if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST) {
1216 			rc = mctp_astlpc_update_channel(astlpc, status);
1217 			if (rc < 0)
1218 				return rc;
1219 		}
1220 		break;
1221 	default:
1222 		astlpc_prwarn(astlpc, "unknown message 0x%x", data);
1223 	}
1224 
1225 	/* Handle silent loss of bmc-ready */
1226 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST) {
1227 		if (!(status & KCS_STATUS_BMC_READY && data == cmd_dummy_value))
1228 			return mctp_astlpc_update_channel(astlpc, status);
1229 	}
1230 
1231 	return rc;
1232 }
1233 
1234 /* allocate and basic initialisation */
1235 static struct mctp_binding_astlpc *__mctp_astlpc_init(uint8_t mode,
1236 						      uint32_t mtu)
1237 {
1238 	struct mctp_binding_astlpc *astlpc;
1239 
1240 	assert((mode == MCTP_BINDING_ASTLPC_MODE_BMC) ||
1241 	       (mode == MCTP_BINDING_ASTLPC_MODE_HOST));
1242 
1243 	astlpc = __mctp_alloc(sizeof(*astlpc));
1244 	if (!astlpc)
1245 		return NULL;
1246 
1247 	memset(astlpc, 0, sizeof(*astlpc));
1248 	astlpc->mode = mode;
1249 	astlpc->lpc_map = NULL;
1250 	astlpc->layout.rx.state = buffer_state_idle;
1251 	astlpc->layout.tx.state = buffer_state_idle;
1252 	astlpc->requested_mtu = mtu;
1253 	astlpc->binding.name = "astlpc";
1254 	astlpc->binding.version = 1;
1255 	astlpc->binding.pkt_size =
1256 		MCTP_PACKET_SIZE(mtu > MCTP_BTU ? mtu : MCTP_BTU);
1257 	astlpc->binding.pkt_header = 4;
1258 	astlpc->binding.pkt_trailer = 4;
1259 	astlpc->binding.tx = mctp_binding_astlpc_tx;
1260 	if (mode == MCTP_BINDING_ASTLPC_MODE_BMC)
1261 		astlpc->binding.start = mctp_binding_astlpc_start_bmc;
1262 	else if (mode == MCTP_BINDING_ASTLPC_MODE_HOST)
1263 		astlpc->binding.start = mctp_binding_astlpc_start_host;
1264 	else {
1265 		astlpc_prerr(astlpc, "%s: Invalid mode: %d\n", __func__, mode);
1266 		__mctp_free(astlpc);
1267 		return NULL;
1268 	}
1269 
1270 	return astlpc;
1271 }
1272 
1273 struct mctp_binding *mctp_binding_astlpc_core(struct mctp_binding_astlpc *b)
1274 {
1275 	return &b->binding;
1276 }
1277 
1278 struct mctp_binding_astlpc *
1279 mctp_astlpc_init(uint8_t mode, uint32_t mtu, void *lpc_map,
1280 		 const struct mctp_binding_astlpc_ops *ops, void *ops_data)
1281 {
1282 	struct mctp_binding_astlpc *astlpc;
1283 
1284 	if (!(mode == MCTP_BINDING_ASTLPC_MODE_BMC ||
1285 	      mode == MCTP_BINDING_ASTLPC_MODE_HOST)) {
1286 		mctp_prerr("Unknown binding mode: %u", mode);
1287 		return NULL;
1288 	}
1289 
1290 	astlpc = __mctp_astlpc_init(mode, mtu);
1291 	if (!astlpc)
1292 		return NULL;
1293 
1294 	memcpy(&astlpc->ops, ops, sizeof(astlpc->ops));
1295 	astlpc->ops_data = ops_data;
1296 	astlpc->lpc_map = lpc_map;
1297 	astlpc->mode = mode;
1298 
1299 	return astlpc;
1300 }
1301 
1302 struct mctp_binding_astlpc *
1303 mctp_astlpc_init_ops(const struct mctp_binding_astlpc_ops *ops, void *ops_data,
1304 		     void *lpc_map)
1305 {
1306 	return mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, lpc_map,
1307 				ops, ops_data);
1308 }
1309 
1310 void mctp_astlpc_destroy(struct mctp_binding_astlpc *astlpc)
1311 {
1312 	/* Clear channel-active and bmc-ready */
1313 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC)
1314 		mctp_astlpc_kcs_set_status(astlpc, 0);
1315 	__mctp_free(astlpc);
1316 }
1317 
1318 #ifdef MCTP_HAVE_FILEIO
1319 
1320 static int mctp_astlpc_init_fileio_lpc(struct mctp_binding_astlpc *astlpc)
1321 {
1322 	struct aspeed_lpc_ctrl_mapping map = {
1323 		.window_type = ASPEED_LPC_CTRL_WINDOW_MEMORY,
1324 		.window_id = 0, /* There's only one */
1325 		.flags = 0,
1326 		.addr = 0,
1327 		.offset = 0,
1328 		.size = 0
1329 	};
1330 	void *lpc_map_base;
1331 	int fd, rc;
1332 
1333 	fd = open(lpc_path, O_RDWR | O_SYNC);
1334 	if (fd < 0) {
1335 		astlpc_prwarn(astlpc, "LPC open (%s) failed", lpc_path);
1336 		return -1;
1337 	}
1338 
1339 	rc = ioctl(fd, ASPEED_LPC_CTRL_IOCTL_GET_SIZE, &map);
1340 	if (rc) {
1341 		astlpc_prwarn(astlpc, "LPC GET_SIZE failed");
1342 		close(fd);
1343 		return -1;
1344 	}
1345 
1346 	/*
1347 	 * ������
1348 	 *
1349 	 * Decouple ourselves from hiomapd[1] (another user of the FW2AHB) by
1350 	 * mapping the FW2AHB to the reserved memory here as well.
1351 	 *
1352 	 * It's not possible to use the MCTP ASTLPC binding on machines that
1353 	 * need the FW2AHB bridge mapped anywhere except to the reserved memory
1354 	 * (e.g. the host SPI NOR).
1355 	 *
1356 	 * [1] https://github.com/openbmc/hiomapd/
1357 	 *
1358 	 * ������
1359 	 *
1360 	 * The following calculation must align with what's going on in
1361 	 * hiomapd's lpc.c so as not to disrupt its behaviour:
1362 	 *
1363 	 * https://github.com/openbmc/hiomapd/blob/5ff50e3cbd7702aefc185264e4adfb9952040575/lpc.c#L68
1364 	 *
1365 	 * ������
1366 	 */
1367 
1368 	/* Map the reserved memory at the top of the 28-bit LPC firmware address space */
1369 	map.addr = 0x0FFFFFFF & -map.size;
1370 	astlpc_prinfo(
1371 		astlpc,
1372 		"Configuring FW2AHB to map reserved memory at 0x%08x for 0x%x in the LPC FW cycle address-space",
1373 		map.addr, map.size);
1374 
1375 	rc = ioctl(fd, ASPEED_LPC_CTRL_IOCTL_MAP, &map);
1376 	if (rc) {
1377 		astlpc_prwarn(astlpc,
1378 			      "Failed to map FW2AHB to reserved memory");
1379 		close(fd);
1380 		return -1;
1381 	}
1382 
1383 	/* Map the reserved memory into our address space */
1384 	lpc_map_base =
1385 		mmap(NULL, map.size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1386 	if (lpc_map_base == MAP_FAILED) {
1387 		astlpc_prwarn(astlpc, "LPC mmap failed");
1388 		rc = -1;
1389 	} else {
1390 		astlpc->lpc_map = lpc_map_base + map.size - LPC_WIN_SIZE;
1391 	}
1392 
1393 	close(fd);
1394 
1395 	return rc;
1396 }
1397 
1398 static int mctp_astlpc_init_fileio_kcs(struct mctp_binding_astlpc *astlpc)
1399 {
1400 	astlpc->kcs_fd = open(kcs_path, O_RDWR);
1401 	if (astlpc->kcs_fd < 0)
1402 		return -1;
1403 
1404 	return 0;
1405 }
1406 
1407 static int __mctp_astlpc_fileio_kcs_read(void *arg,
1408 					 enum mctp_binding_astlpc_kcs_reg reg,
1409 					 uint8_t *val)
1410 {
1411 	struct mctp_binding_astlpc *astlpc = arg;
1412 	off_t offset = reg;
1413 	int rc;
1414 
1415 	rc = pread(astlpc->kcs_fd, val, 1, offset);
1416 
1417 	return rc == 1 ? 0 : -1;
1418 }
1419 
1420 static int __mctp_astlpc_fileio_kcs_write(void *arg,
1421 					  enum mctp_binding_astlpc_kcs_reg reg,
1422 					  uint8_t val)
1423 {
1424 	struct mctp_binding_astlpc *astlpc = arg;
1425 	off_t offset = reg;
1426 	int rc;
1427 
1428 	rc = pwrite(astlpc->kcs_fd, &val, 1, offset);
1429 
1430 	return rc == 1 ? 0 : -1;
1431 }
1432 
1433 int mctp_astlpc_init_pollfd(struct mctp_binding_astlpc *astlpc,
1434 			    struct pollfd *pollfd)
1435 {
1436 	bool release;
1437 
1438 	pollfd->fd = astlpc->kcs_fd;
1439 	pollfd->events = 0;
1440 
1441 	release = astlpc->layout.rx.state == buffer_state_prepared ||
1442 		  astlpc->layout.tx.state == buffer_state_prepared;
1443 
1444 	pollfd->events = release ? POLLOUT : POLLIN;
1445 
1446 	return 0;
1447 }
1448 
1449 struct mctp_binding_astlpc *mctp_astlpc_init_fileio(void)
1450 {
1451 	struct mctp_binding_astlpc *astlpc;
1452 	int rc;
1453 
1454 	/*
1455 	 * If we're doing file IO then we're very likely not running
1456 	 * freestanding, so lets assume that we're on the BMC side.
1457 	 *
1458 	 * Requesting an MTU of 0 requests the largest possible MTU, whatever
1459 	 * value that might take.
1460 	 */
1461 	astlpc = __mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, 0);
1462 	if (!astlpc)
1463 		return NULL;
1464 
1465 	/* Set internal operations for kcs. We use direct accesses to the lpc
1466 	 * map area */
1467 	astlpc->ops.kcs_read = __mctp_astlpc_fileio_kcs_read;
1468 	astlpc->ops.kcs_write = __mctp_astlpc_fileio_kcs_write;
1469 	astlpc->ops_data = astlpc;
1470 
1471 	rc = mctp_astlpc_init_fileio_lpc(astlpc);
1472 	if (rc) {
1473 		free(astlpc);
1474 		return NULL;
1475 	}
1476 
1477 	rc = mctp_astlpc_init_fileio_kcs(astlpc);
1478 	if (rc) {
1479 		free(astlpc);
1480 		return NULL;
1481 	}
1482 
1483 	return astlpc;
1484 }
1485 #else
1486 struct mctp_binding_astlpc *mctp_astlpc_init_fileio(void)
1487 {
1488 	mctp_prlog(MCTP_LOG_ERR, "%s: Missing support for file IO", __func__);
1489 	return NULL;
1490 }
1491 
1492 int mctp_astlpc_init_pollfd(struct mctp_binding_astlpc *astlpc __unused,
1493 			    struct pollfd *pollfd __unused)
1494 {
1495 	mctp_prlog(MCTP_LOG_ERR, "%s: Missing support for file IO", __func__);
1496 	return -1;
1497 }
1498 #endif
1499