xref: /openbmc/libmctp/astlpc.c (revision a68185c4)
1 /* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */
2 
3 #if HAVE_CONFIG_H
4 #include "config.h"
5 #endif
6 
7 #if HAVE_ENDIAN_H
8 #include <endian.h>
9 #endif
10 
11 #include <assert.h>
12 #include <err.h>
13 #include <errno.h>
14 #include <inttypes.h>
15 #include <stdbool.h>
16 #include <stdlib.h>
17 #include <string.h>
18 
19 #define pr_fmt(x) "astlpc: " x
20 
21 #include "container_of.h"
22 #include "crc32.h"
23 #include "libmctp.h"
24 #include "libmctp-alloc.h"
25 #include "libmctp-log.h"
26 #include "libmctp-astlpc.h"
27 #include "range.h"
28 
29 #ifdef MCTP_HAVE_FILEIO
30 
31 #include <unistd.h>
32 #include <fcntl.h>
33 #include <poll.h>
34 #include <sys/ioctl.h>
35 #include <sys/mman.h>
36 #include <linux/aspeed-lpc-ctrl.h>
37 
38 /* kernel interface */
39 static const char *kcs_path = "/dev/mctp0";
40 static const char *lpc_path = "/dev/aspeed-lpc-ctrl";
41 
42 #endif
43 
44 enum mctp_astlpc_buffer_state {
45 	/*
46 	 * Prior to "Channel Ready" we mark the buffers as "idle" to catch illegal accesses. In this
47 	 * state neither side is considered the owner of the buffer.
48 	 *
49 	 * Upon "Channel Ready", each side transitions the buffers from the initial "idle" state
50 	 * to the following target states:
51 	 *
52 	 * Tx buffer: "acquired"
53 	 * Rx buffer: "released"
54 	 */
55 	buffer_state_idle,
56 
57 	/*
58 	 * Beyond initialisation by "Channel Ready", buffers are in the "acquired" state once:
59 	 *
60 	 * 1. We dequeue a control command transferring the buffer to our ownership out of the KCS
61 	 *    interface, and
62 	 * 2. We are yet to complete all of our required accesses to the buffer
63 	 *
64 	 * * The Tx buffer enters the "acquired" state when we dequeue the "Rx Complete" command
65 	 * * The Rx buffer enters the "acquired" state when we dequeue the "Tx Begin" command
66 	 *
67 	 * It is a failure of implementation if it's possible for both sides to simultaneously
68 	 * consider a buffer as "acquired".
69 	 */
70 	buffer_state_acquired,
71 
72 	/*
73 	 * Buffers are in the "prepared" state when:
74 	 *
75 	 * 1. We have completed all of our required accesses (read or write) for the buffer, and
76 	 * 2. We have not yet successfully enqueued the control command to hand off ownership
77 	 */
78 	buffer_state_prepared,
79 
80 	/*
81 	 * Beyond initialisation by "Channel Ready", buffers are in the "released" state once:
82 	 *
83 	 * 1. We successfully enqueue the control command transferring ownership to the remote
84 	 *    side in to the KCS interface
85 	 *
86 	 * * The Tx buffer enters the "released" state when we enqueue the "Tx Begin" command
87 	 * * The Rx buffer enters the "released" state when we enqueue the "Rx Complete" command
88 	 *
89 	 * It may be the case that both sides simultaneously consider a buffer to be in the
90 	 * "released" state. However, if this is true, it must also be true that a buffer ownership
91 	 * transfer command has been enqueued in the KCS interface and is yet to be dequeued.
92 	 */
93 	buffer_state_released,
94 };
95 
96 struct mctp_astlpc_buffer {
97 	uint32_t offset;
98 	uint32_t size;
99 	enum mctp_astlpc_buffer_state state;
100 };
101 
102 struct mctp_astlpc_layout {
103 	struct mctp_astlpc_buffer rx;
104 	struct mctp_astlpc_buffer tx;
105 };
106 
107 struct mctp_astlpc_protocol {
108 	uint16_t version;
109 	uint32_t (*packet_size)(uint32_t body);
110 	uint32_t (*body_size)(uint32_t packet);
111 	void (*pktbuf_protect)(struct mctp_pktbuf *pkt);
112 	bool (*pktbuf_validate)(struct mctp_pktbuf *pkt);
113 };
114 
115 struct mctp_binding_astlpc {
116 	struct mctp_binding binding;
117 
118 	void *lpc_map;
119 	struct mctp_astlpc_layout layout;
120 
121 	uint8_t mode;
122 	uint32_t requested_mtu;
123 
124 	const struct mctp_astlpc_protocol *proto;
125 
126 	/* direct ops data */
127 	struct mctp_binding_astlpc_ops ops;
128 	void *ops_data;
129 
130 	/* fileio ops data */
131 	int kcs_fd;
132 	uint8_t kcs_status;
133 };
134 
135 #define binding_to_astlpc(b)                                                   \
136 	container_of(b, struct mctp_binding_astlpc, binding)
137 
138 #define astlpc_prlog(ctx, lvl, fmt, ...)                                       \
139 	do {                                                                   \
140 		bool __bmc = ((ctx)->mode == MCTP_BINDING_ASTLPC_MODE_BMC);    \
141 		mctp_prlog(lvl, pr_fmt("%s: " fmt), __bmc ? "bmc" : "host",    \
142 			   ##__VA_ARGS__);                                     \
143 	} while (0)
144 
145 #define astlpc_prerr(ctx, fmt, ...)                                            \
146 	astlpc_prlog(ctx, MCTP_LOG_ERR, fmt, ##__VA_ARGS__)
147 #define astlpc_prwarn(ctx, fmt, ...)                                           \
148 	astlpc_prlog(ctx, MCTP_LOG_WARNING, fmt, ##__VA_ARGS__)
149 #define astlpc_prinfo(ctx, fmt, ...)                                           \
150 	astlpc_prlog(ctx, MCTP_LOG_INFO, fmt, ##__VA_ARGS__)
151 #define astlpc_prdebug(ctx, fmt, ...)                                          \
152 	astlpc_prlog(ctx, MCTP_LOG_DEBUG, fmt, ##__VA_ARGS__)
153 
154 /* clang-format off */
155 #define ASTLPC_MCTP_MAGIC	0x4d435450
156 #define ASTLPC_VER_BAD	0
157 #define ASTLPC_VER_MIN	1
158 
159 /* Support testing of new binding protocols */
160 #ifndef ASTLPC_VER_CUR
161 #define ASTLPC_VER_CUR	3
162 #endif
163 /* clang-format on */
164 
165 #ifndef ARRAY_SIZE
166 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
167 #endif
168 
169 static uint32_t astlpc_packet_size_v1(uint32_t body)
170 {
171 	assert((body + 4) > body);
172 
173 	return body + 4;
174 }
175 
176 static uint32_t astlpc_body_size_v1(uint32_t packet)
177 {
178 	assert((packet - 4) < packet);
179 
180 	return packet - 4;
181 }
182 
183 void astlpc_pktbuf_protect_v1(struct mctp_pktbuf *pkt)
184 {
185 	(void)pkt;
186 }
187 
188 bool astlpc_pktbuf_validate_v1(struct mctp_pktbuf *pkt)
189 {
190 	(void)pkt;
191 	return true;
192 }
193 
194 static uint32_t astlpc_packet_size_v3(uint32_t body)
195 {
196 	assert((body + 4 + 4) > body);
197 
198 	return body + 4 + 4;
199 }
200 
201 static uint32_t astlpc_body_size_v3(uint32_t packet)
202 {
203 	assert((packet - 4 - 4) < packet);
204 
205 	return packet - 4 - 4;
206 }
207 
208 void astlpc_pktbuf_protect_v3(struct mctp_pktbuf *pkt)
209 {
210 	uint32_t code;
211 
212 	code = htobe32(crc32(mctp_pktbuf_hdr(pkt), mctp_pktbuf_size(pkt)));
213 	mctp_prdebug("%s: 0x%" PRIx32, __func__, code);
214 	mctp_pktbuf_push(pkt, &code, 4);
215 }
216 
217 bool astlpc_pktbuf_validate_v3(struct mctp_pktbuf *pkt)
218 {
219 	uint32_t code;
220 	void *check;
221 
222 	code = be32toh(crc32(mctp_pktbuf_hdr(pkt), mctp_pktbuf_size(pkt) - 4));
223 	mctp_prdebug("%s: 0x%" PRIx32, __func__, code);
224 	check = mctp_pktbuf_pop(pkt, 4);
225 	return check && !memcmp(&code, check, 4);
226 }
227 
228 static const struct mctp_astlpc_protocol astlpc_protocol_version[] = {
229 	[0] = {
230 		.version = 0,
231 		.packet_size = NULL,
232 		.body_size = NULL,
233 		.pktbuf_protect = NULL,
234 		.pktbuf_validate = NULL,
235 	},
236 	[1] = {
237 		.version = 1,
238 		.packet_size = astlpc_packet_size_v1,
239 		.body_size = astlpc_body_size_v1,
240 		.pktbuf_protect = astlpc_pktbuf_protect_v1,
241 		.pktbuf_validate = astlpc_pktbuf_validate_v1,
242 	},
243 	[2] = {
244 		.version = 2,
245 		.packet_size = astlpc_packet_size_v1,
246 		.body_size = astlpc_body_size_v1,
247 		.pktbuf_protect = astlpc_pktbuf_protect_v1,
248 		.pktbuf_validate = astlpc_pktbuf_validate_v1,
249 	},
250 	[3] = {
251 		.version = 3,
252 		.packet_size = astlpc_packet_size_v3,
253 		.body_size = astlpc_body_size_v3,
254 		.pktbuf_protect = astlpc_pktbuf_protect_v3,
255 		.pktbuf_validate = astlpc_pktbuf_validate_v3,
256 	},
257 };
258 
259 struct mctp_lpcmap_hdr {
260 	uint32_t magic;
261 
262 	uint16_t bmc_ver_min;
263 	uint16_t bmc_ver_cur;
264 	uint16_t host_ver_min;
265 	uint16_t host_ver_cur;
266 	uint16_t negotiated_ver;
267 	uint16_t pad0;
268 
269 	struct {
270 		uint32_t rx_offset;
271 		uint32_t rx_size;
272 		uint32_t tx_offset;
273 		uint32_t tx_size;
274 	} layout;
275 } __attribute__((packed));
276 
277 static const uint32_t control_size = 0x100;
278 
279 #define LPC_WIN_SIZE (1 * 1024 * 1024)
280 
281 #define KCS_STATUS_BMC_READY	  0x80
282 #define KCS_STATUS_CHANNEL_ACTIVE 0x40
283 #define KCS_STATUS_IBF		  0x02
284 #define KCS_STATUS_OBF		  0x01
285 
286 static inline int mctp_astlpc_kcs_write(struct mctp_binding_astlpc *astlpc,
287 					enum mctp_binding_astlpc_kcs_reg reg,
288 					uint8_t val)
289 {
290 	return astlpc->ops.kcs_write(astlpc->ops_data, reg, val);
291 }
292 
293 static inline int mctp_astlpc_kcs_read(struct mctp_binding_astlpc *astlpc,
294 				       enum mctp_binding_astlpc_kcs_reg reg,
295 				       uint8_t *val)
296 {
297 	return astlpc->ops.kcs_read(astlpc->ops_data, reg, val);
298 }
299 
300 static inline int mctp_astlpc_lpc_write(struct mctp_binding_astlpc *astlpc,
301 					const void *buf, long offset,
302 					size_t len)
303 {
304 	astlpc_prdebug(astlpc, "%s: %zu bytes to 0x%lx", __func__, len, offset);
305 
306 	assert(offset >= 0);
307 
308 	/* Indirect access */
309 	if (astlpc->ops.lpc_write) {
310 		void *data = astlpc->ops_data;
311 
312 		return astlpc->ops.lpc_write(data, buf, offset, len);
313 	}
314 
315 	/* Direct mapping */
316 	assert(astlpc->lpc_map);
317 	memcpy(&((char *)astlpc->lpc_map)[offset], buf, len);
318 
319 	return 0;
320 }
321 
322 static inline int mctp_astlpc_lpc_read(struct mctp_binding_astlpc *astlpc,
323 				       void *buf, long offset, size_t len)
324 {
325 	astlpc_prdebug(astlpc, "%s: %zu bytes from 0x%lx", __func__, len,
326 		       offset);
327 
328 	assert(offset >= 0);
329 
330 	/* Indirect access */
331 	if (astlpc->ops.lpc_read) {
332 		void *data = astlpc->ops_data;
333 
334 		return astlpc->ops.lpc_read(data, buf, offset, len);
335 	}
336 
337 	/* Direct mapping */
338 	assert(astlpc->lpc_map);
339 	memcpy(buf, &((char *)astlpc->lpc_map)[offset], len);
340 
341 	return 0;
342 }
343 
344 static int mctp_astlpc_kcs_set_status(struct mctp_binding_astlpc *astlpc,
345 				      uint8_t status)
346 {
347 	uint8_t data;
348 	int rc;
349 
350 	/* Since we're setting the status register, we want the other endpoint
351 	 * to be interrupted. However, some hardware may only raise a host-side
352 	 * interrupt on an ODR event.
353 	 * So, write a dummy value of 0xff to ODR, which will ensure that an
354 	 * interrupt is triggered, and can be ignored by the host.
355 	 */
356 	data = 0xff;
357 
358 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, status);
359 	if (rc) {
360 		astlpc_prwarn(astlpc, "KCS status write failed");
361 		return -1;
362 	}
363 
364 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, data);
365 	if (rc) {
366 		astlpc_prwarn(astlpc, "KCS dummy data write failed");
367 		return -1;
368 	}
369 
370 	return 0;
371 }
372 
373 static int mctp_astlpc_layout_read(struct mctp_binding_astlpc *astlpc,
374 				   struct mctp_astlpc_layout *layout)
375 {
376 	struct mctp_lpcmap_hdr hdr;
377 	int rc;
378 
379 	rc = mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
380 	if (rc < 0)
381 		return rc;
382 
383 	/* Flip the buffers as the names are defined in terms of the host */
384 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) {
385 		layout->rx.offset = be32toh(hdr.layout.tx_offset);
386 		layout->rx.size = be32toh(hdr.layout.tx_size);
387 		layout->tx.offset = be32toh(hdr.layout.rx_offset);
388 		layout->tx.size = be32toh(hdr.layout.rx_size);
389 	} else {
390 		assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
391 
392 		layout->rx.offset = be32toh(hdr.layout.rx_offset);
393 		layout->rx.size = be32toh(hdr.layout.rx_size);
394 		layout->tx.offset = be32toh(hdr.layout.tx_offset);
395 		layout->tx.size = be32toh(hdr.layout.tx_size);
396 	}
397 
398 	return 0;
399 }
400 
401 static int mctp_astlpc_layout_write(struct mctp_binding_astlpc *astlpc,
402 				    struct mctp_astlpc_layout *layout)
403 {
404 	uint32_t rx_size_be;
405 
406 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) {
407 		struct mctp_lpcmap_hdr hdr;
408 
409 		/*
410 		 * Flip the buffers as the names are defined in terms of the
411 		 * host
412 		 */
413 		hdr.layout.rx_offset = htobe32(layout->tx.offset);
414 		hdr.layout.rx_size = htobe32(layout->tx.size);
415 		hdr.layout.tx_offset = htobe32(layout->rx.offset);
416 		hdr.layout.tx_size = htobe32(layout->rx.size);
417 
418 		return mctp_astlpc_lpc_write(astlpc, &hdr.layout,
419 					     offsetof(struct mctp_lpcmap_hdr,
420 						      layout),
421 					     sizeof(hdr.layout));
422 	}
423 
424 	assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
425 
426 	/*
427 	 * As of v2 we only need to write rx_size - the offsets are controlled
428 	 * by the BMC, as is the BMC's rx_size (host tx_size).
429 	 */
430 	rx_size_be = htobe32(layout->rx.size);
431 	return mctp_astlpc_lpc_write(astlpc, &rx_size_be,
432 				     offsetof(struct mctp_lpcmap_hdr,
433 					      layout.rx_size),
434 				     sizeof(rx_size_be));
435 }
436 
437 static bool
438 mctp_astlpc_buffer_validate(const struct mctp_binding_astlpc *astlpc,
439 			    const struct mctp_astlpc_buffer *buf,
440 			    const char *name)
441 {
442 	/* Check for overflow */
443 	if (buf->offset + buf->size < buf->offset) {
444 		mctp_prerr(
445 			"%s packet buffer parameters overflow: offset: 0x%" PRIx32
446 			", size: %" PRIu32,
447 			name, buf->offset, buf->size);
448 		return false;
449 	}
450 
451 	/* Check that the buffers are contained within the allocated space */
452 	if (buf->offset + buf->size > LPC_WIN_SIZE) {
453 		mctp_prerr(
454 			"%s packet buffer parameters exceed %uM window size: offset: 0x%" PRIx32
455 			", size: %" PRIu32,
456 			name, (LPC_WIN_SIZE / (1024 * 1024)), buf->offset,
457 			buf->size);
458 		return false;
459 	}
460 
461 	/* Check that the baseline transmission unit is supported */
462 	if (buf->size <
463 	    astlpc->proto->packet_size(MCTP_PACKET_SIZE(MCTP_BTU))) {
464 		mctp_prerr(
465 			"%s packet buffer too small: Require %" PRIu32
466 			" bytes to support the %u byte baseline transmission unit, found %" PRIu32,
467 			name,
468 			astlpc->proto->packet_size(MCTP_PACKET_SIZE(MCTP_BTU)),
469 			MCTP_BTU, buf->size);
470 		return false;
471 	}
472 
473 	/* Check for overlap with the control space */
474 	if (buf->offset < control_size) {
475 		mctp_prerr(
476 			"%s packet buffer overlaps control region {0x%" PRIx32
477 			", %" PRIu32 "}: Rx {0x%" PRIx32 ", %" PRIu32 "}",
478 			name, 0U, control_size, buf->offset, buf->size);
479 		return false;
480 	}
481 
482 	return true;
483 }
484 
485 static bool
486 mctp_astlpc_layout_validate(const struct mctp_binding_astlpc *astlpc,
487 			    const struct mctp_astlpc_layout *layout)
488 {
489 	const struct mctp_astlpc_buffer *rx = &layout->rx;
490 	const struct mctp_astlpc_buffer *tx = &layout->tx;
491 	bool rx_valid, tx_valid;
492 
493 	rx_valid = mctp_astlpc_buffer_validate(astlpc, rx, "Rx");
494 	tx_valid = mctp_astlpc_buffer_validate(astlpc, tx, "Tx");
495 
496 	if (!(rx_valid && tx_valid))
497 		return false;
498 
499 	/* Check that the buffers are disjoint */
500 	if ((rx->offset <= tx->offset && rx->offset + rx->size > tx->offset) ||
501 	    (tx->offset <= rx->offset && tx->offset + tx->size > rx->offset)) {
502 		mctp_prerr("Rx and Tx packet buffers overlap: Rx {0x%" PRIx32
503 			   ", %" PRIu32 "}, Tx {0x%" PRIx32 ", %" PRIu32 "}",
504 			   rx->offset, rx->size, tx->offset, tx->size);
505 		return false;
506 	}
507 
508 	return true;
509 }
510 
511 static int mctp_astlpc_init_bmc(struct mctp_binding_astlpc *astlpc)
512 {
513 	struct mctp_lpcmap_hdr hdr = { 0 };
514 	uint8_t status;
515 	uint32_t sz;
516 
517 	/*
518 	 * The largest buffer size is half of the allocated MCTP space
519 	 * excluding the control space.
520 	 */
521 	sz = ((LPC_WIN_SIZE - control_size) / 2);
522 
523 	/*
524 	 * Trim the MTU to a multiple of 16 to meet the requirements of 12.17
525 	 * Query Hop in DSP0236 v1.3.0.
526 	 */
527 	sz = MCTP_BODY_SIZE(astlpc->proto->body_size(sz));
528 	sz &= ~0xfUL;
529 	sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(sz));
530 
531 	if (astlpc->requested_mtu) {
532 		uint32_t rpkt, rmtu;
533 
534 		rmtu = astlpc->requested_mtu;
535 		rpkt = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu));
536 		sz = MIN(sz, rpkt);
537 	}
538 
539 	/* Flip the buffers as the names are defined in terms of the host */
540 	astlpc->layout.tx.offset = control_size;
541 	astlpc->layout.tx.size = sz;
542 	astlpc->layout.rx.offset =
543 		astlpc->layout.tx.offset + astlpc->layout.tx.size;
544 	astlpc->layout.rx.size = sz;
545 
546 	if (!mctp_astlpc_layout_validate(astlpc, &astlpc->layout)) {
547 		astlpc_prerr(astlpc, "Cannot support an MTU of %" PRIu32, sz);
548 		return -EINVAL;
549 	}
550 
551 	hdr = (struct mctp_lpcmap_hdr){
552 		.magic = htobe32(ASTLPC_MCTP_MAGIC),
553 		.bmc_ver_min = htobe16(ASTLPC_VER_MIN),
554 		.bmc_ver_cur = htobe16(ASTLPC_VER_CUR),
555 
556 		/* Flip the buffers back as we're now describing the host's
557 		 * configuration to the host */
558 		.layout.rx_offset = htobe32(astlpc->layout.tx.offset),
559 		.layout.rx_size = htobe32(astlpc->layout.tx.size),
560 		.layout.tx_offset = htobe32(astlpc->layout.rx.offset),
561 		.layout.tx_size = htobe32(astlpc->layout.rx.size),
562 	};
563 
564 	mctp_astlpc_lpc_write(astlpc, &hdr, 0, sizeof(hdr));
565 
566 	/*
567 	 * Set status indicating that the BMC is now active. Be explicit about
568 	 * clearing OBF; we're reinitialising the binding and so any previous
569 	 * buffer state is irrelevant.
570 	 */
571 	status = KCS_STATUS_BMC_READY & ~KCS_STATUS_OBF;
572 	return mctp_astlpc_kcs_set_status(astlpc, status);
573 }
574 
575 static int mctp_binding_astlpc_start_bmc(struct mctp_binding *b)
576 {
577 	struct mctp_binding_astlpc *astlpc =
578 		container_of(b, struct mctp_binding_astlpc, binding);
579 
580 	astlpc->proto = &astlpc_protocol_version[ASTLPC_VER_CUR];
581 
582 	return mctp_astlpc_init_bmc(astlpc);
583 }
584 
585 static bool mctp_astlpc_validate_version(uint16_t bmc_ver_min,
586 					 uint16_t bmc_ver_cur,
587 					 uint16_t host_ver_min,
588 					 uint16_t host_ver_cur)
589 {
590 	if (!(bmc_ver_min && bmc_ver_cur && host_ver_min && host_ver_cur)) {
591 		mctp_prerr("Invalid version present in [%" PRIu16 ", %" PRIu16
592 			   "], [%" PRIu16 ", %" PRIu16 "]",
593 			   bmc_ver_min, bmc_ver_cur, host_ver_min,
594 			   host_ver_cur);
595 		return false;
596 	} else if (bmc_ver_min > bmc_ver_cur) {
597 		mctp_prerr("Invalid bmc version range [%" PRIu16 ", %" PRIu16
598 			   "]",
599 			   bmc_ver_min, bmc_ver_cur);
600 		return false;
601 	} else if (host_ver_min > host_ver_cur) {
602 		mctp_prerr("Invalid host version range [%" PRIu16 ", %" PRIu16
603 			   "]",
604 			   host_ver_min, host_ver_cur);
605 		return false;
606 	} else if ((host_ver_cur < bmc_ver_min) ||
607 		   (host_ver_min > bmc_ver_cur)) {
608 		mctp_prerr(
609 			"Unable to satisfy version negotiation with ranges [%" PRIu16
610 			", %" PRIu16 "] and [%" PRIu16 ", %" PRIu16 "]",
611 			bmc_ver_min, bmc_ver_cur, host_ver_min, host_ver_cur);
612 		return false;
613 	}
614 
615 	return true;
616 }
617 
618 static int mctp_astlpc_negotiate_layout_host(struct mctp_binding_astlpc *astlpc)
619 {
620 	struct mctp_astlpc_layout layout;
621 	uint32_t rmtu;
622 	uint32_t sz;
623 	int rc;
624 
625 	rc = mctp_astlpc_layout_read(astlpc, &layout);
626 	if (rc < 0)
627 		return rc;
628 
629 	if (!mctp_astlpc_layout_validate(astlpc, &layout)) {
630 		astlpc_prerr(
631 			astlpc,
632 			"BMC provided invalid buffer layout: Rx {0x%" PRIx32
633 			", %" PRIu32 "}, Tx {0x%" PRIx32 ", %" PRIu32 "}",
634 			layout.rx.offset, layout.rx.size, layout.tx.offset,
635 			layout.tx.size);
636 		return -EINVAL;
637 	}
638 
639 	astlpc_prinfo(astlpc, "Desire an MTU of %" PRIu32 " bytes",
640 		      astlpc->requested_mtu);
641 
642 	rmtu = astlpc->requested_mtu;
643 	sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu));
644 	layout.rx.size = sz;
645 
646 	if (!mctp_astlpc_layout_validate(astlpc, &layout)) {
647 		astlpc_prerr(
648 			astlpc,
649 			"Generated invalid buffer layout with size %" PRIu32
650 			": Rx {0x%" PRIx32 ", %" PRIu32 "}, Tx {0x%" PRIx32
651 			", %" PRIu32 "}",
652 			sz, layout.rx.offset, layout.rx.size, layout.tx.offset,
653 			layout.tx.size);
654 		return -EINVAL;
655 	}
656 
657 	astlpc_prinfo(astlpc, "Requesting MTU of %" PRIu32 " bytes",
658 		      astlpc->requested_mtu);
659 
660 	return mctp_astlpc_layout_write(astlpc, &layout);
661 }
662 
663 static uint16_t mctp_astlpc_negotiate_version(uint16_t bmc_ver_min,
664 					      uint16_t bmc_ver_cur,
665 					      uint16_t host_ver_min,
666 					      uint16_t host_ver_cur)
667 {
668 	if (!mctp_astlpc_validate_version(bmc_ver_min, bmc_ver_cur,
669 					  host_ver_min, host_ver_cur))
670 		return ASTLPC_VER_BAD;
671 
672 	if (bmc_ver_cur < host_ver_cur)
673 		return bmc_ver_cur;
674 
675 	return host_ver_cur;
676 }
677 
678 static int mctp_astlpc_init_host(struct mctp_binding_astlpc *astlpc)
679 {
680 	const uint16_t ver_min_be = htobe16(ASTLPC_VER_MIN);
681 	const uint16_t ver_cur_be = htobe16(ASTLPC_VER_CUR);
682 	uint16_t bmc_ver_min, bmc_ver_cur, negotiated;
683 	struct mctp_lpcmap_hdr hdr;
684 	uint8_t status;
685 	int rc;
686 
687 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status);
688 	if (rc) {
689 		mctp_prwarn("KCS status read failed");
690 		return rc;
691 	}
692 
693 	astlpc->kcs_status = status;
694 
695 	if (!(status & KCS_STATUS_BMC_READY))
696 		return -EHOSTDOWN;
697 
698 	mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
699 
700 	bmc_ver_min = be16toh(hdr.bmc_ver_min);
701 	bmc_ver_cur = be16toh(hdr.bmc_ver_cur);
702 
703 	/* Calculate the expected value of negotiated_ver */
704 	negotiated = mctp_astlpc_negotiate_version(
705 		bmc_ver_min, bmc_ver_cur, ASTLPC_VER_MIN, ASTLPC_VER_CUR);
706 	if (!negotiated) {
707 		astlpc_prerr(astlpc, "Cannot negotiate with invalid versions");
708 		return -EINVAL;
709 	}
710 
711 	/* Assign protocol ops so we can calculate the packet buffer sizes */
712 	assert(negotiated < ARRAY_SIZE(astlpc_protocol_version));
713 	astlpc->proto = &astlpc_protocol_version[negotiated];
714 
715 	/* Negotiate packet buffers in v2 style if the BMC supports it */
716 	if (negotiated >= 2) {
717 		rc = mctp_astlpc_negotiate_layout_host(astlpc);
718 		if (rc < 0)
719 			return rc;
720 	}
721 
722 	/* Advertise the host's supported protocol versions */
723 	mctp_astlpc_lpc_write(astlpc, &ver_min_be,
724 			      offsetof(struct mctp_lpcmap_hdr, host_ver_min),
725 			      sizeof(ver_min_be));
726 
727 	mctp_astlpc_lpc_write(astlpc, &ver_cur_be,
728 			      offsetof(struct mctp_lpcmap_hdr, host_ver_cur),
729 			      sizeof(ver_cur_be));
730 
731 	/* Send channel init command */
732 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, 0x0);
733 	if (rc) {
734 		astlpc_prwarn(astlpc, "KCS write failed");
735 	}
736 
737 	/*
738 	 * Configure the host so `astlpc->proto->version == 0` holds until we
739 	 * receive a subsequent status update from the BMC. Until then,
740 	 * `astlpc->proto->version == 0` indicates that we're yet to complete
741 	 * the channel initialisation handshake.
742 	 *
743 	 * When the BMC provides a status update with KCS_STATUS_CHANNEL_ACTIVE
744 	 * set we will assign the appropriate protocol ops struct in accordance
745 	 * with `negotiated_ver`.
746 	 */
747 	astlpc->proto = &astlpc_protocol_version[ASTLPC_VER_BAD];
748 
749 	return rc;
750 }
751 
752 static int mctp_binding_astlpc_start_host(struct mctp_binding *b)
753 {
754 	struct mctp_binding_astlpc *astlpc =
755 		container_of(b, struct mctp_binding_astlpc, binding);
756 
757 	return mctp_astlpc_init_host(astlpc);
758 }
759 
760 static bool __mctp_astlpc_kcs_ready(struct mctp_binding_astlpc *astlpc,
761 				    uint8_t status, bool is_write)
762 {
763 	bool is_bmc;
764 	bool ready_state;
765 	uint8_t flag;
766 
767 	is_bmc = (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC);
768 	flag = (is_bmc ^ is_write) ? KCS_STATUS_IBF : KCS_STATUS_OBF;
769 	ready_state = is_write ? 0 : 1;
770 
771 	return !!(status & flag) == ready_state;
772 }
773 
774 static inline bool
775 mctp_astlpc_kcs_read_ready(struct mctp_binding_astlpc *astlpc, uint8_t status)
776 {
777 	return __mctp_astlpc_kcs_ready(astlpc, status, false);
778 }
779 
780 static inline bool
781 mctp_astlpc_kcs_write_ready(struct mctp_binding_astlpc *astlpc, uint8_t status)
782 {
783 	return __mctp_astlpc_kcs_ready(astlpc, status, true);
784 }
785 
786 static int mctp_astlpc_kcs_send(struct mctp_binding_astlpc *astlpc,
787 				uint8_t data)
788 {
789 	uint8_t status;
790 	int rc;
791 
792 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status);
793 	if (rc) {
794 		astlpc_prwarn(astlpc, "KCS status read failed");
795 		return -EIO;
796 	}
797 	if (!mctp_astlpc_kcs_write_ready(astlpc, status))
798 		return -EBUSY;
799 
800 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, data);
801 	if (rc) {
802 		astlpc_prwarn(astlpc, "KCS data write failed");
803 		return -EIO;
804 	}
805 
806 	return 0;
807 }
808 
809 static int mctp_binding_astlpc_tx(struct mctp_binding *b,
810 				  struct mctp_pktbuf *pkt)
811 {
812 	struct mctp_binding_astlpc *astlpc = binding_to_astlpc(b);
813 	uint32_t len, len_be;
814 	struct mctp_hdr *hdr;
815 	int rc;
816 
817 	hdr = mctp_pktbuf_hdr(pkt);
818 	len = mctp_pktbuf_size(pkt);
819 
820 	astlpc_prdebug(astlpc,
821 		       "%s: Transmitting %" PRIu32
822 		       "-byte packet (%hhu, %hhu, 0x%hhx)",
823 		       __func__, len, hdr->src, hdr->dest, hdr->flags_seq_tag);
824 
825 	if (len > astlpc->proto->body_size(astlpc->layout.tx.size)) {
826 		astlpc_prwarn(astlpc, "invalid TX len %" PRIu32 ": %" PRIu32,
827 			      len,
828 			      astlpc->proto->body_size(astlpc->layout.tx.size));
829 		return -EMSGSIZE;
830 	}
831 
832 	mctp_binding_set_tx_enabled(b, false);
833 
834 	len_be = htobe32(len);
835 	mctp_astlpc_lpc_write(astlpc, &len_be, astlpc->layout.tx.offset,
836 			      sizeof(len_be));
837 
838 	astlpc->proto->pktbuf_protect(pkt);
839 	len = mctp_pktbuf_size(pkt);
840 
841 	mctp_astlpc_lpc_write(astlpc, hdr, astlpc->layout.tx.offset + 4, len);
842 
843 	astlpc->layout.tx.state = buffer_state_prepared;
844 
845 	rc = mctp_astlpc_kcs_send(astlpc, 0x1);
846 	if (!rc)
847 		astlpc->layout.tx.state = buffer_state_released;
848 
849 	return rc == -EBUSY ? 0 : rc;
850 }
851 
852 static uint32_t mctp_astlpc_calculate_mtu(struct mctp_binding_astlpc *astlpc,
853 					  struct mctp_astlpc_layout *layout)
854 {
855 	uint32_t low, high, limit, rpkt;
856 
857 	/* Derive the largest MTU the BMC _can_ support */
858 	low = MIN(astlpc->layout.rx.offset, astlpc->layout.tx.offset);
859 	high = MAX(astlpc->layout.rx.offset, astlpc->layout.tx.offset);
860 	limit = high - low;
861 
862 	/* Determine the largest MTU the BMC _wants_ to support */
863 	if (astlpc->requested_mtu) {
864 		uint32_t rmtu = astlpc->requested_mtu;
865 
866 		rpkt = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu));
867 		limit = MIN(limit, rpkt);
868 	}
869 
870 	/* Determine the accepted MTU, applied both directions by convention */
871 	rpkt = MIN(limit, layout->tx.size);
872 	return MCTP_BODY_SIZE(astlpc->proto->body_size(rpkt));
873 }
874 
875 static int mctp_astlpc_negotiate_layout_bmc(struct mctp_binding_astlpc *astlpc)
876 {
877 	struct mctp_astlpc_layout proposed, pending;
878 	uint32_t sz, mtu;
879 	int rc;
880 
881 	/* Do we have a valid protocol version? */
882 	if (!astlpc->proto->version)
883 		return -EINVAL;
884 
885 	/* Extract the host's proposed layout */
886 	rc = mctp_astlpc_layout_read(astlpc, &proposed);
887 	if (rc < 0)
888 		return rc;
889 
890 	/* Do we have a reasonable layout? */
891 	if (!mctp_astlpc_layout_validate(astlpc, &proposed))
892 		return -EINVAL;
893 
894 	/* Negotiate the MTU */
895 	mtu = mctp_astlpc_calculate_mtu(astlpc, &proposed);
896 	sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(mtu));
897 
898 	/*
899 	 * Use symmetric MTUs by convention and to pass constraints in rx/tx
900 	 * functions
901 	 */
902 	pending = astlpc->layout;
903 	pending.tx.size = sz;
904 	pending.rx.size = sz;
905 
906 	if (mctp_astlpc_layout_validate(astlpc, &pending)) {
907 		/* We found a sensible Rx MTU, so honour it */
908 		astlpc->layout = pending;
909 
910 		/* Enforce the negotiated MTU */
911 		rc = mctp_astlpc_layout_write(astlpc, &astlpc->layout);
912 		if (rc < 0)
913 			return rc;
914 
915 		astlpc_prinfo(astlpc, "Negotiated an MTU of %" PRIu32 " bytes",
916 			      mtu);
917 	} else {
918 		astlpc_prwarn(astlpc, "MTU negotiation failed");
919 		return -EINVAL;
920 	}
921 
922 	if (astlpc->proto->version >= 2)
923 		astlpc->binding.pkt_size = MCTP_PACKET_SIZE(mtu);
924 
925 	return 0;
926 }
927 
928 static void mctp_astlpc_init_channel(struct mctp_binding_astlpc *astlpc)
929 {
930 	uint16_t negotiated, negotiated_be;
931 	struct mctp_lpcmap_hdr hdr;
932 	uint8_t status;
933 	int rc;
934 
935 	mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
936 
937 	/* Version negotiation */
938 	negotiated =
939 		mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_CUR,
940 					      be16toh(hdr.host_ver_min),
941 					      be16toh(hdr.host_ver_cur));
942 
943 	/* MTU negotiation requires knowing which protocol we'll use */
944 	assert(negotiated < ARRAY_SIZE(astlpc_protocol_version));
945 	astlpc->proto = &astlpc_protocol_version[negotiated];
946 
947 	/* Host Rx MTU negotiation: Failure terminates channel init */
948 	rc = mctp_astlpc_negotiate_layout_bmc(astlpc);
949 	if (rc < 0)
950 		negotiated = ASTLPC_VER_BAD;
951 
952 	/* Populate the negotiated version */
953 	negotiated_be = htobe16(negotiated);
954 	mctp_astlpc_lpc_write(astlpc, &negotiated_be,
955 			      offsetof(struct mctp_lpcmap_hdr, negotiated_ver),
956 			      sizeof(negotiated_be));
957 
958 	/* Track buffer ownership */
959 	astlpc->layout.tx.state = buffer_state_acquired;
960 	astlpc->layout.rx.state = buffer_state_released;
961 
962 	/* Finalise the configuration */
963 	status = KCS_STATUS_BMC_READY | KCS_STATUS_OBF;
964 	if (negotiated > 0) {
965 		astlpc_prinfo(astlpc, "Negotiated binding version %" PRIu16,
966 			      negotiated);
967 		status |= KCS_STATUS_CHANNEL_ACTIVE;
968 	} else {
969 		astlpc_prerr(astlpc, "Failed to initialise channel");
970 	}
971 
972 	mctp_astlpc_kcs_set_status(astlpc, status);
973 
974 	mctp_binding_set_tx_enabled(&astlpc->binding,
975 				    status & KCS_STATUS_CHANNEL_ACTIVE);
976 }
977 
978 static void mctp_astlpc_rx_start(struct mctp_binding_astlpc *astlpc)
979 {
980 	struct mctp_pktbuf *pkt;
981 	struct mctp_hdr *hdr;
982 	uint32_t body, packet;
983 
984 	mctp_astlpc_lpc_read(astlpc, &body, astlpc->layout.rx.offset,
985 			     sizeof(body));
986 	body = be32toh(body);
987 
988 	if (body > astlpc->proto->body_size(astlpc->layout.rx.size)) {
989 		astlpc_prwarn(astlpc, "invalid RX len 0x%x", body);
990 		return;
991 	}
992 
993 	if ((size_t)body > astlpc->binding.pkt_size) {
994 		astlpc_prwarn(astlpc, "invalid RX len 0x%x", body);
995 		return;
996 	}
997 
998 	/* Eliminate the medium-specific header that we just read */
999 	packet = astlpc->proto->packet_size(body) - 4;
1000 	pkt = mctp_pktbuf_alloc(&astlpc->binding, packet);
1001 	if (!pkt) {
1002 		astlpc_prwarn(astlpc, "unable to allocate pktbuf len 0x%x",
1003 			      packet);
1004 		return;
1005 	}
1006 
1007 	/*
1008 	 * Read payload and medium-specific trailer from immediately after the
1009 	 * medium-specific header.
1010 	 */
1011 	mctp_astlpc_lpc_read(astlpc, mctp_pktbuf_hdr(pkt),
1012 			     astlpc->layout.rx.offset + 4, packet);
1013 
1014 	astlpc->layout.rx.state = buffer_state_prepared;
1015 
1016 	/* Inform the other side of the MCTP interface that we have read
1017 	 * the packet off the bus before handling the contents of the packet.
1018 	 */
1019 	if (!mctp_astlpc_kcs_send(astlpc, 0x2))
1020 		astlpc->layout.rx.state = buffer_state_released;
1021 
1022 	hdr = mctp_pktbuf_hdr(pkt);
1023 	if (hdr->ver != 1) {
1024 		mctp_pktbuf_free(pkt);
1025 		astlpc_prdebug(astlpc, "Dropped packet with invalid version");
1026 		return;
1027 	}
1028 
1029 	/*
1030 	 * v3 will validate the CRC32 in the medium-specific trailer and adjust
1031 	 * the packet size accordingly. On older protocols validation is a no-op
1032 	 * that always returns true.
1033 	 */
1034 	if (astlpc->proto->pktbuf_validate(pkt)) {
1035 		mctp_bus_rx(&astlpc->binding, pkt);
1036 	} else {
1037 		/* TODO: Drop any associated assembly */
1038 		mctp_pktbuf_free(pkt);
1039 		astlpc_prdebug(astlpc, "Dropped corrupt packet");
1040 	}
1041 }
1042 
1043 static void mctp_astlpc_tx_complete(struct mctp_binding_astlpc *astlpc)
1044 {
1045 	astlpc->layout.tx.state = buffer_state_acquired;
1046 	mctp_binding_set_tx_enabled(&astlpc->binding, true);
1047 }
1048 
1049 static int mctp_astlpc_finalise_channel(struct mctp_binding_astlpc *astlpc)
1050 {
1051 	struct mctp_astlpc_layout layout;
1052 	uint16_t negotiated;
1053 	int rc;
1054 
1055 	rc = mctp_astlpc_lpc_read(astlpc, &negotiated,
1056 				  offsetof(struct mctp_lpcmap_hdr,
1057 					   negotiated_ver),
1058 				  sizeof(negotiated));
1059 	if (rc < 0)
1060 		return rc;
1061 
1062 	negotiated = be16toh(negotiated);
1063 	astlpc_prerr(astlpc, "Version negotiation got: %u", negotiated);
1064 
1065 	if (negotiated == ASTLPC_VER_BAD || negotiated < ASTLPC_VER_MIN ||
1066 	    negotiated > ASTLPC_VER_CUR) {
1067 		astlpc_prerr(astlpc, "Failed to negotiate version, got: %u\n",
1068 			     negotiated);
1069 		return -EINVAL;
1070 	}
1071 
1072 	assert(negotiated < ARRAY_SIZE(astlpc_protocol_version));
1073 	astlpc->proto = &astlpc_protocol_version[negotiated];
1074 
1075 	rc = mctp_astlpc_layout_read(astlpc, &layout);
1076 	if (rc < 0)
1077 		return rc;
1078 
1079 	if (!mctp_astlpc_layout_validate(astlpc, &layout)) {
1080 		mctp_prerr("BMC proposed invalid buffer parameters");
1081 		return -EINVAL;
1082 	}
1083 
1084 	astlpc->layout = layout;
1085 
1086 	if (negotiated >= 2)
1087 		astlpc->binding.pkt_size =
1088 			astlpc->proto->body_size(astlpc->layout.tx.size);
1089 
1090 	/* Track buffer ownership */
1091 	astlpc->layout.tx.state = buffer_state_acquired;
1092 	astlpc->layout.rx.state = buffer_state_released;
1093 
1094 	return 0;
1095 }
1096 
1097 static int mctp_astlpc_update_channel(struct mctp_binding_astlpc *astlpc,
1098 				      uint8_t status)
1099 {
1100 	uint8_t updated;
1101 	int rc = 0;
1102 
1103 	assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
1104 
1105 	updated = astlpc->kcs_status ^ status;
1106 
1107 	astlpc_prdebug(astlpc, "%s: status: 0x%x, update: 0x%x", __func__,
1108 		       status, updated);
1109 
1110 	if (updated & KCS_STATUS_BMC_READY) {
1111 		if (status & KCS_STATUS_BMC_READY) {
1112 			astlpc->kcs_status = status;
1113 			return astlpc->binding.start(&astlpc->binding);
1114 		} else {
1115 			/* Shut down the channel */
1116 			astlpc->layout.rx.state = buffer_state_idle;
1117 			astlpc->layout.tx.state = buffer_state_idle;
1118 			mctp_binding_set_tx_enabled(&astlpc->binding, false);
1119 		}
1120 	}
1121 
1122 	if (astlpc->proto->version == 0 ||
1123 	    updated & KCS_STATUS_CHANNEL_ACTIVE) {
1124 		bool enable;
1125 
1126 		astlpc->layout.rx.state = buffer_state_idle;
1127 		astlpc->layout.tx.state = buffer_state_idle;
1128 		rc = mctp_astlpc_finalise_channel(astlpc);
1129 		enable = (status & KCS_STATUS_CHANNEL_ACTIVE) && rc == 0;
1130 		mctp_binding_set_tx_enabled(&astlpc->binding, enable);
1131 	}
1132 
1133 	astlpc->kcs_status = status;
1134 
1135 	return rc;
1136 }
1137 
1138 int mctp_astlpc_poll(struct mctp_binding_astlpc *astlpc)
1139 {
1140 	uint8_t status, data;
1141 	int rc;
1142 
1143 	if (astlpc->layout.rx.state == buffer_state_prepared)
1144 		if (!mctp_astlpc_kcs_send(astlpc, 0x2))
1145 			astlpc->layout.rx.state = buffer_state_released;
1146 
1147 	if (astlpc->layout.tx.state == buffer_state_prepared)
1148 		if (!mctp_astlpc_kcs_send(astlpc, 0x1))
1149 			astlpc->layout.tx.state = buffer_state_released;
1150 
1151 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status);
1152 	if (rc) {
1153 		astlpc_prwarn(astlpc, "KCS read error");
1154 		return -1;
1155 	}
1156 
1157 	astlpc_prdebug(astlpc, "%s: status: 0x%hhx", __func__, status);
1158 
1159 	if (!mctp_astlpc_kcs_read_ready(astlpc, status))
1160 		return 0;
1161 
1162 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_DATA, &data);
1163 	if (rc) {
1164 		astlpc_prwarn(astlpc, "KCS data read error");
1165 		return -1;
1166 	}
1167 
1168 	astlpc_prdebug(astlpc, "%s: data: 0x%hhx", __func__, data);
1169 
1170 	if (!astlpc->proto->version && !(data == 0x0 || data == 0xff)) {
1171 		astlpc_prwarn(astlpc, "Invalid message for binding state: 0x%x",
1172 			      data);
1173 		return 0;
1174 	}
1175 
1176 	switch (data) {
1177 	case 0x0:
1178 		mctp_astlpc_init_channel(astlpc);
1179 		break;
1180 	case 0x1:
1181 		if (astlpc->layout.rx.state != buffer_state_released) {
1182 			astlpc_prerr(
1183 				astlpc,
1184 				"Protocol error: Invalid Rx buffer state for event %d: %d\n",
1185 				data, astlpc->layout.rx.state);
1186 			return 0;
1187 		}
1188 		mctp_astlpc_rx_start(astlpc);
1189 		break;
1190 	case 0x2:
1191 		if (astlpc->layout.tx.state != buffer_state_released) {
1192 			astlpc_prerr(
1193 				astlpc,
1194 				"Protocol error: Invalid Tx buffer state for event %d: %d\n",
1195 				data, astlpc->layout.tx.state);
1196 			return 0;
1197 		}
1198 		mctp_astlpc_tx_complete(astlpc);
1199 		break;
1200 	case 0xff:
1201 		/* No responsibilities for the BMC on 0xff */
1202 		if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST) {
1203 			rc = mctp_astlpc_update_channel(astlpc, status);
1204 			if (rc < 0)
1205 				return rc;
1206 		}
1207 		break;
1208 	default:
1209 		astlpc_prwarn(astlpc, "unknown message 0x%x", data);
1210 	}
1211 
1212 	/* Handle silent loss of bmc-ready */
1213 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST) {
1214 		if (!(status & KCS_STATUS_BMC_READY && data == 0xff))
1215 			return mctp_astlpc_update_channel(astlpc, status);
1216 	}
1217 
1218 	return rc;
1219 }
1220 
1221 /* allocate and basic initialisation */
1222 static struct mctp_binding_astlpc *__mctp_astlpc_init(uint8_t mode,
1223 						      uint32_t mtu)
1224 {
1225 	struct mctp_binding_astlpc *astlpc;
1226 
1227 	assert((mode == MCTP_BINDING_ASTLPC_MODE_BMC) ||
1228 	       (mode == MCTP_BINDING_ASTLPC_MODE_HOST));
1229 
1230 	astlpc = __mctp_alloc(sizeof(*astlpc));
1231 	if (!astlpc)
1232 		return NULL;
1233 
1234 	memset(astlpc, 0, sizeof(*astlpc));
1235 	astlpc->mode = mode;
1236 	astlpc->lpc_map = NULL;
1237 	astlpc->layout.rx.state = buffer_state_idle;
1238 	astlpc->layout.tx.state = buffer_state_idle;
1239 	astlpc->requested_mtu = mtu;
1240 	astlpc->binding.name = "astlpc";
1241 	astlpc->binding.version = 1;
1242 	astlpc->binding.pkt_size =
1243 		MCTP_PACKET_SIZE(mtu > MCTP_BTU ? mtu : MCTP_BTU);
1244 	astlpc->binding.pkt_header = 4;
1245 	astlpc->binding.pkt_trailer = 4;
1246 	astlpc->binding.tx = mctp_binding_astlpc_tx;
1247 	if (mode == MCTP_BINDING_ASTLPC_MODE_BMC)
1248 		astlpc->binding.start = mctp_binding_astlpc_start_bmc;
1249 	else if (mode == MCTP_BINDING_ASTLPC_MODE_HOST)
1250 		astlpc->binding.start = mctp_binding_astlpc_start_host;
1251 	else {
1252 		astlpc_prerr(astlpc, "%s: Invalid mode: %d\n", __func__, mode);
1253 		__mctp_free(astlpc);
1254 		return NULL;
1255 	}
1256 
1257 	return astlpc;
1258 }
1259 
1260 struct mctp_binding *mctp_binding_astlpc_core(struct mctp_binding_astlpc *b)
1261 {
1262 	return &b->binding;
1263 }
1264 
1265 struct mctp_binding_astlpc *
1266 mctp_astlpc_init(uint8_t mode, uint32_t mtu, void *lpc_map,
1267 		 const struct mctp_binding_astlpc_ops *ops, void *ops_data)
1268 {
1269 	struct mctp_binding_astlpc *astlpc;
1270 
1271 	if (!(mode == MCTP_BINDING_ASTLPC_MODE_BMC ||
1272 	      mode == MCTP_BINDING_ASTLPC_MODE_HOST)) {
1273 		mctp_prerr("Unknown binding mode: %u", mode);
1274 		return NULL;
1275 	}
1276 
1277 	astlpc = __mctp_astlpc_init(mode, mtu);
1278 	if (!astlpc)
1279 		return NULL;
1280 
1281 	memcpy(&astlpc->ops, ops, sizeof(astlpc->ops));
1282 	astlpc->ops_data = ops_data;
1283 	astlpc->lpc_map = lpc_map;
1284 	astlpc->mode = mode;
1285 
1286 	return astlpc;
1287 }
1288 
1289 struct mctp_binding_astlpc *
1290 mctp_astlpc_init_ops(const struct mctp_binding_astlpc_ops *ops, void *ops_data,
1291 		     void *lpc_map)
1292 {
1293 	return mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, lpc_map,
1294 				ops, ops_data);
1295 }
1296 
1297 void mctp_astlpc_destroy(struct mctp_binding_astlpc *astlpc)
1298 {
1299 	/* Clear channel-active and bmc-ready */
1300 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC)
1301 		mctp_astlpc_kcs_set_status(astlpc, 0);
1302 	__mctp_free(astlpc);
1303 }
1304 
1305 #ifdef MCTP_HAVE_FILEIO
1306 
1307 static int mctp_astlpc_init_fileio_lpc(struct mctp_binding_astlpc *astlpc)
1308 {
1309 	struct aspeed_lpc_ctrl_mapping map = {
1310 		.window_type = ASPEED_LPC_CTRL_WINDOW_MEMORY,
1311 		.window_id = 0, /* There's only one */
1312 		.flags = 0,
1313 		.addr = 0,
1314 		.offset = 0,
1315 		.size = 0
1316 	};
1317 	void *lpc_map_base;
1318 	int fd, rc;
1319 
1320 	fd = open(lpc_path, O_RDWR | O_SYNC);
1321 	if (fd < 0) {
1322 		astlpc_prwarn(astlpc, "LPC open (%s) failed", lpc_path);
1323 		return -1;
1324 	}
1325 
1326 	rc = ioctl(fd, ASPEED_LPC_CTRL_IOCTL_GET_SIZE, &map);
1327 	if (rc) {
1328 		astlpc_prwarn(astlpc, "LPC GET_SIZE failed");
1329 		close(fd);
1330 		return -1;
1331 	}
1332 
1333 	/*
1334 	 * ������
1335 	 *
1336 	 * Decouple ourselves from hiomapd[1] (another user of the FW2AHB) by
1337 	 * mapping the FW2AHB to the reserved memory here as well.
1338 	 *
1339 	 * It's not possible to use the MCTP ASTLPC binding on machines that
1340 	 * need the FW2AHB bridge mapped anywhere except to the reserved memory
1341 	 * (e.g. the host SPI NOR).
1342 	 *
1343 	 * [1] https://github.com/openbmc/hiomapd/
1344 	 *
1345 	 * ������
1346 	 *
1347 	 * The following calculation must align with what's going on in
1348 	 * hiomapd's lpc.c so as not to disrupt its behaviour:
1349 	 *
1350 	 * https://github.com/openbmc/hiomapd/blob/5ff50e3cbd7702aefc185264e4adfb9952040575/lpc.c#L68
1351 	 *
1352 	 * ������
1353 	 */
1354 
1355 	/* Map the reserved memory at the top of the 28-bit LPC firmware address space */
1356 	map.addr = 0x0FFFFFFF & -map.size;
1357 	astlpc_prinfo(
1358 		astlpc,
1359 		"Configuring FW2AHB to map reserved memory at 0x%08x for 0x%x in the LPC FW cycle address-space",
1360 		map.addr, map.size);
1361 
1362 	rc = ioctl(fd, ASPEED_LPC_CTRL_IOCTL_MAP, &map);
1363 	if (rc) {
1364 		astlpc_prwarn(astlpc,
1365 			      "Failed to map FW2AHB to reserved memory");
1366 		close(fd);
1367 		return -1;
1368 	}
1369 
1370 	/* Map the reserved memory into our address space */
1371 	lpc_map_base =
1372 		mmap(NULL, map.size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1373 	if (lpc_map_base == MAP_FAILED) {
1374 		astlpc_prwarn(astlpc, "LPC mmap failed");
1375 		rc = -1;
1376 	} else {
1377 		astlpc->lpc_map = lpc_map_base + map.size - LPC_WIN_SIZE;
1378 	}
1379 
1380 	close(fd);
1381 
1382 	return rc;
1383 }
1384 
1385 static int mctp_astlpc_init_fileio_kcs(struct mctp_binding_astlpc *astlpc)
1386 {
1387 	astlpc->kcs_fd = open(kcs_path, O_RDWR);
1388 	if (astlpc->kcs_fd < 0)
1389 		return -1;
1390 
1391 	return 0;
1392 }
1393 
1394 static int __mctp_astlpc_fileio_kcs_read(void *arg,
1395 					 enum mctp_binding_astlpc_kcs_reg reg,
1396 					 uint8_t *val)
1397 {
1398 	struct mctp_binding_astlpc *astlpc = arg;
1399 	off_t offset = reg;
1400 	int rc;
1401 
1402 	rc = pread(astlpc->kcs_fd, val, 1, offset);
1403 
1404 	return rc == 1 ? 0 : -1;
1405 }
1406 
1407 static int __mctp_astlpc_fileio_kcs_write(void *arg,
1408 					  enum mctp_binding_astlpc_kcs_reg reg,
1409 					  uint8_t val)
1410 {
1411 	struct mctp_binding_astlpc *astlpc = arg;
1412 	off_t offset = reg;
1413 	int rc;
1414 
1415 	rc = pwrite(astlpc->kcs_fd, &val, 1, offset);
1416 
1417 	return rc == 1 ? 0 : -1;
1418 }
1419 
1420 int mctp_astlpc_init_pollfd(struct mctp_binding_astlpc *astlpc,
1421 			    struct pollfd *pollfd)
1422 {
1423 	bool release;
1424 
1425 	pollfd->fd = astlpc->kcs_fd;
1426 	pollfd->events = 0;
1427 
1428 	release = astlpc->layout.rx.state == buffer_state_prepared ||
1429 		  astlpc->layout.tx.state == buffer_state_prepared;
1430 
1431 	pollfd->events = release ? POLLOUT : POLLIN;
1432 
1433 	return 0;
1434 }
1435 
1436 struct mctp_binding_astlpc *mctp_astlpc_init_fileio(void)
1437 {
1438 	struct mctp_binding_astlpc *astlpc;
1439 	int rc;
1440 
1441 	/*
1442 	 * If we're doing file IO then we're very likely not running
1443 	 * freestanding, so lets assume that we're on the BMC side.
1444 	 *
1445 	 * Requesting an MTU of 0 requests the largest possible MTU, whatever
1446 	 * value that might take.
1447 	 */
1448 	astlpc = __mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, 0);
1449 	if (!astlpc)
1450 		return NULL;
1451 
1452 	/* Set internal operations for kcs. We use direct accesses to the lpc
1453 	 * map area */
1454 	astlpc->ops.kcs_read = __mctp_astlpc_fileio_kcs_read;
1455 	astlpc->ops.kcs_write = __mctp_astlpc_fileio_kcs_write;
1456 	astlpc->ops_data = astlpc;
1457 
1458 	rc = mctp_astlpc_init_fileio_lpc(astlpc);
1459 	if (rc) {
1460 		free(astlpc);
1461 		return NULL;
1462 	}
1463 
1464 	rc = mctp_astlpc_init_fileio_kcs(astlpc);
1465 	if (rc) {
1466 		free(astlpc);
1467 		return NULL;
1468 	}
1469 
1470 	return astlpc;
1471 }
1472 #else
1473 struct mctp_binding_astlpc *mctp_astlpc_init_fileio(void)
1474 {
1475 	mctp_prlog(MCTP_LOG_ERR, "%s: Missing support for file IO", __func__);
1476 	return NULL;
1477 }
1478 
1479 int mctp_astlpc_init_pollfd(struct mctp_binding_astlpc *astlpc __unused,
1480 			    struct pollfd *pollfd __unused)
1481 {
1482 	mctp_prlog(MCTP_LOG_ERR, "%s: Missing support for file IO", __func__);
1483 	return -1;
1484 }
1485 #endif
1486