xref: /openbmc/libmctp/astlpc.c (revision 8f53d631)
1 /* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */
2 
3 #if HAVE_CONFIG_H
4 #include "config.h"
5 #endif
6 
7 #if HAVE_ENDIAN_H
8 #include <endian.h>
9 #endif
10 
11 #include <assert.h>
12 #include <err.h>
13 #include <errno.h>
14 #include <inttypes.h>
15 #include <stdbool.h>
16 #include <stdlib.h>
17 #include <string.h>
18 
19 #define pr_fmt(x) "astlpc: " x
20 
21 #include "container_of.h"
22 #include "crc32.h"
23 #include "libmctp.h"
24 #include "libmctp-alloc.h"
25 #include "libmctp-log.h"
26 #include "libmctp-astlpc.h"
27 #include "range.h"
28 
29 #ifdef MCTP_HAVE_FILEIO
30 
31 #include <unistd.h>
32 #include <fcntl.h>
33 #include <poll.h>
34 #include <sys/ioctl.h>
35 #include <sys/mman.h>
36 #include <linux/aspeed-lpc-ctrl.h>
37 
38 /* kernel interface */
39 static const char *kcs_path = "/dev/mctp0";
40 static const char *lpc_path = "/dev/aspeed-lpc-ctrl";
41 
42 #endif
43 
44 struct mctp_astlpc_buffer {
45 	uint32_t offset;
46 	uint32_t size;
47 };
48 
49 struct mctp_astlpc_layout {
50 	struct mctp_astlpc_buffer rx;
51 	struct mctp_astlpc_buffer tx;
52 };
53 
54 struct mctp_astlpc_protocol {
55 	uint16_t version;
56 	uint32_t (*packet_size)(uint32_t body);
57 	uint32_t (*body_size)(uint32_t packet);
58 	void (*pktbuf_protect)(struct mctp_pktbuf *pkt);
59 	bool (*pktbuf_validate)(struct mctp_pktbuf *pkt);
60 };
61 
62 struct mctp_binding_astlpc {
63 	struct mctp_binding	binding;
64 
65 	void *lpc_map;
66 	struct mctp_astlpc_layout layout;
67 
68 	uint8_t mode;
69 	uint32_t requested_mtu;
70 
71 	const struct mctp_astlpc_protocol *proto;
72 
73 	/* direct ops data */
74 	struct mctp_binding_astlpc_ops ops;
75 	void *ops_data;
76 
77 	/* fileio ops data */
78 	int kcs_fd;
79 	uint8_t kcs_status;
80 };
81 
82 #define binding_to_astlpc(b) \
83 	container_of(b, struct mctp_binding_astlpc, binding)
84 
85 #define astlpc_prlog(ctx, lvl, fmt, ...)                                       \
86 	do {                                                                   \
87 		bool __bmc = ((ctx)->mode == MCTP_BINDING_ASTLPC_MODE_BMC);    \
88 		mctp_prlog(lvl, pr_fmt("%s: " fmt), __bmc ? "bmc" : "host",    \
89 			   ##__VA_ARGS__);                                     \
90 	} while (0)
91 
92 #define astlpc_prerr(ctx, fmt, ...)                                            \
93 	astlpc_prlog(ctx, MCTP_LOG_ERR, fmt, ##__VA_ARGS__)
94 #define astlpc_prwarn(ctx, fmt, ...)                                           \
95 	astlpc_prlog(ctx, MCTP_LOG_WARNING, fmt, ##__VA_ARGS__)
96 #define astlpc_prinfo(ctx, fmt, ...)                                           \
97 	astlpc_prlog(ctx, MCTP_LOG_INFO, fmt, ##__VA_ARGS__)
98 #define astlpc_prdebug(ctx, fmt, ...)                                          \
99 	astlpc_prlog(ctx, MCTP_LOG_DEBUG, fmt, ##__VA_ARGS__)
100 
101 /* clang-format off */
102 #define ASTLPC_MCTP_MAGIC	0x4d435450
103 #define ASTLPC_VER_BAD	0
104 #define ASTLPC_VER_MIN	1
105 
106 /* Support testing of new binding protocols */
107 #ifndef ASTLPC_VER_CUR
108 #define ASTLPC_VER_CUR	3
109 #endif
110 /* clang-format on */
111 
112 #ifndef ARRAY_SIZE
113 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
114 #endif
115 
116 static uint32_t astlpc_packet_size_v1(uint32_t body)
117 {
118 	assert((body + 4) > body);
119 
120 	return body + 4;
121 }
122 
123 static uint32_t astlpc_body_size_v1(uint32_t packet)
124 {
125 	assert((packet - 4) < packet);
126 
127 	return packet - 4;
128 }
129 
130 void astlpc_pktbuf_protect_v1(struct mctp_pktbuf *pkt)
131 {
132 	(void)pkt;
133 }
134 
135 bool astlpc_pktbuf_validate_v1(struct mctp_pktbuf *pkt)
136 {
137 	(void)pkt;
138 	return true;
139 }
140 
141 static uint32_t astlpc_packet_size_v3(uint32_t body)
142 {
143 	assert((body + 4 + 4) > body);
144 
145 	return body + 4 + 4;
146 }
147 
148 static uint32_t astlpc_body_size_v3(uint32_t packet)
149 {
150 	assert((packet - 4 - 4) < packet);
151 
152 	return packet - 4 - 4;
153 }
154 
155 void astlpc_pktbuf_protect_v3(struct mctp_pktbuf *pkt)
156 {
157 	uint32_t code;
158 
159 	code = htobe32(crc32(mctp_pktbuf_hdr(pkt), mctp_pktbuf_size(pkt)));
160 	mctp_prdebug("%s: 0x%" PRIx32, __func__, code);
161 	mctp_pktbuf_push(pkt, &code, 4);
162 }
163 
164 bool astlpc_pktbuf_validate_v3(struct mctp_pktbuf *pkt)
165 {
166 	uint32_t code;
167 	void *check;
168 
169 	code = be32toh(crc32(mctp_pktbuf_hdr(pkt), mctp_pktbuf_size(pkt) - 4));
170 	mctp_prdebug("%s: 0x%" PRIx32, __func__, code);
171 	check = mctp_pktbuf_pop(pkt, 4);
172 	return check && !memcmp(&code, check, 4);
173 }
174 
175 static const struct mctp_astlpc_protocol astlpc_protocol_version[] = {
176 	[0] = {
177 		.version = 0,
178 		.packet_size = NULL,
179 		.body_size = NULL,
180 		.pktbuf_protect = NULL,
181 		.pktbuf_validate = NULL,
182 	},
183 	[1] = {
184 		.version = 1,
185 		.packet_size = astlpc_packet_size_v1,
186 		.body_size = astlpc_body_size_v1,
187 		.pktbuf_protect = astlpc_pktbuf_protect_v1,
188 		.pktbuf_validate = astlpc_pktbuf_validate_v1,
189 	},
190 	[2] = {
191 		.version = 2,
192 		.packet_size = astlpc_packet_size_v1,
193 		.body_size = astlpc_body_size_v1,
194 		.pktbuf_protect = astlpc_pktbuf_protect_v1,
195 		.pktbuf_validate = astlpc_pktbuf_validate_v1,
196 	},
197 	[3] = {
198 		.version = 3,
199 		.packet_size = astlpc_packet_size_v3,
200 		.body_size = astlpc_body_size_v3,
201 		.pktbuf_protect = astlpc_pktbuf_protect_v3,
202 		.pktbuf_validate = astlpc_pktbuf_validate_v3,
203 	},
204 };
205 
206 struct mctp_lpcmap_hdr {
207 	uint32_t magic;
208 
209 	uint16_t bmc_ver_min;
210 	uint16_t bmc_ver_cur;
211 	uint16_t host_ver_min;
212 	uint16_t host_ver_cur;
213 	uint16_t negotiated_ver;
214 	uint16_t pad0;
215 
216 	struct {
217 		uint32_t rx_offset;
218 		uint32_t rx_size;
219 		uint32_t tx_offset;
220 		uint32_t tx_size;
221 	} layout;
222 } __attribute__((packed));
223 
224 static const uint32_t control_size = 0x100;
225 
226 #define LPC_WIN_SIZE                (1 * 1024 * 1024)
227 
228 #define KCS_STATUS_BMC_READY		0x80
229 #define KCS_STATUS_CHANNEL_ACTIVE	0x40
230 #define KCS_STATUS_IBF			0x02
231 #define KCS_STATUS_OBF			0x01
232 
233 static inline int mctp_astlpc_kcs_write(struct mctp_binding_astlpc *astlpc,
234 					enum mctp_binding_astlpc_kcs_reg reg,
235 					uint8_t val)
236 {
237 	return astlpc->ops.kcs_write(astlpc->ops_data, reg, val);
238 }
239 
240 static inline int mctp_astlpc_kcs_read(struct mctp_binding_astlpc *astlpc,
241 				       enum mctp_binding_astlpc_kcs_reg reg,
242 				       uint8_t *val)
243 {
244 	return astlpc->ops.kcs_read(astlpc->ops_data, reg, val);
245 }
246 
247 static inline int mctp_astlpc_lpc_write(struct mctp_binding_astlpc *astlpc,
248 					const void *buf, long offset,
249 					size_t len)
250 {
251 	astlpc_prdebug(astlpc, "%s: %zu bytes to 0x%lx", __func__, len, offset);
252 
253 	assert(offset >= 0);
254 
255 	/* Indirect access */
256 	if (astlpc->ops.lpc_write) {
257 		void *data = astlpc->ops_data;
258 
259 		return astlpc->ops.lpc_write(data, buf, offset, len);
260 	}
261 
262 	/* Direct mapping */
263 	assert(astlpc->lpc_map);
264 	memcpy(&((char *)astlpc->lpc_map)[offset], buf, len);
265 
266 	return 0;
267 }
268 
269 static inline int mctp_astlpc_lpc_read(struct mctp_binding_astlpc *astlpc,
270 				       void *buf, long offset, size_t len)
271 {
272 	astlpc_prdebug(astlpc, "%s: %zu bytes from 0x%lx", __func__, len,
273 		       offset);
274 
275 	assert(offset >= 0);
276 
277 	/* Indirect access */
278 	if (astlpc->ops.lpc_read) {
279 		void *data = astlpc->ops_data;
280 
281 		return astlpc->ops.lpc_read(data, buf, offset, len);
282 	}
283 
284 	/* Direct mapping */
285 	assert(astlpc->lpc_map);
286 	memcpy(buf, &((char *)astlpc->lpc_map)[offset], len);
287 
288 	return 0;
289 }
290 
291 static int mctp_astlpc_kcs_set_status(struct mctp_binding_astlpc *astlpc,
292 				      uint8_t status)
293 {
294 	uint8_t data;
295 	int rc;
296 
297 	/* Since we're setting the status register, we want the other endpoint
298 	 * to be interrupted. However, some hardware may only raise a host-side
299 	 * interrupt on an ODR event.
300 	 * So, write a dummy value of 0xff to ODR, which will ensure that an
301 	 * interrupt is triggered, and can be ignored by the host.
302 	 */
303 	data = 0xff;
304 
305 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, status);
306 	if (rc) {
307 		astlpc_prwarn(astlpc, "KCS status write failed");
308 		return -1;
309 	}
310 
311 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, data);
312 	if (rc) {
313 		astlpc_prwarn(astlpc, "KCS dummy data write failed");
314 		return -1;
315 	}
316 
317 	return 0;
318 }
319 
320 static int mctp_astlpc_layout_read(struct mctp_binding_astlpc *astlpc,
321 				   struct mctp_astlpc_layout *layout)
322 {
323 	struct mctp_lpcmap_hdr hdr;
324 	int rc;
325 
326 	rc = mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
327 	if (rc < 0)
328 		return rc;
329 
330 	/* Flip the buffers as the names are defined in terms of the host */
331 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) {
332 		layout->rx.offset = be32toh(hdr.layout.tx_offset);
333 		layout->rx.size = be32toh(hdr.layout.tx_size);
334 		layout->tx.offset = be32toh(hdr.layout.rx_offset);
335 		layout->tx.size = be32toh(hdr.layout.rx_size);
336 	} else {
337 		assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
338 
339 		layout->rx.offset = be32toh(hdr.layout.rx_offset);
340 		layout->rx.size = be32toh(hdr.layout.rx_size);
341 		layout->tx.offset = be32toh(hdr.layout.tx_offset);
342 		layout->tx.size = be32toh(hdr.layout.tx_size);
343 	}
344 
345 	return 0;
346 }
347 
348 static int mctp_astlpc_layout_write(struct mctp_binding_astlpc *astlpc,
349 				    struct mctp_astlpc_layout *layout)
350 {
351 	uint32_t rx_size_be;
352 
353 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) {
354 		struct mctp_lpcmap_hdr hdr;
355 
356 		/*
357 		 * Flip the buffers as the names are defined in terms of the
358 		 * host
359 		 */
360 		hdr.layout.rx_offset = htobe32(layout->tx.offset);
361 		hdr.layout.rx_size = htobe32(layout->tx.size);
362 		hdr.layout.tx_offset = htobe32(layout->rx.offset);
363 		hdr.layout.tx_size = htobe32(layout->rx.size);
364 
365 		return mctp_astlpc_lpc_write(astlpc, &hdr.layout,
366 				offsetof(struct mctp_lpcmap_hdr, layout),
367 				sizeof(hdr.layout));
368 	}
369 
370 	assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
371 
372 	/*
373 	 * As of v2 we only need to write rx_size - the offsets are controlled
374 	 * by the BMC, as is the BMC's rx_size (host tx_size).
375 	 */
376 	rx_size_be = htobe32(layout->rx.size);
377 	return mctp_astlpc_lpc_write(astlpc, &rx_size_be,
378 			offsetof(struct mctp_lpcmap_hdr, layout.rx_size),
379 			sizeof(rx_size_be));
380 }
381 
382 static bool
383 mctp_astlpc_buffer_validate(const struct mctp_binding_astlpc *astlpc,
384 			    const struct mctp_astlpc_buffer *buf,
385 			    const char *name)
386 {
387 	/* Check for overflow */
388 	if (buf->offset + buf->size < buf->offset) {
389 		mctp_prerr(
390 			"%s packet buffer parameters overflow: offset: 0x%" PRIx32
391 			", size: %" PRIu32,
392 			name, buf->offset, buf->size);
393 		return false;
394 	}
395 
396 	/* Check that the buffers are contained within the allocated space */
397 	if (buf->offset + buf->size > LPC_WIN_SIZE) {
398 		mctp_prerr(
399 			"%s packet buffer parameters exceed %uM window size: offset: 0x%" PRIx32
400 			", size: %" PRIu32,
401 			name, (LPC_WIN_SIZE / (1024 * 1024)), buf->offset,
402 			buf->size);
403 		return false;
404 	}
405 
406 	/* Check that the baseline transmission unit is supported */
407 	if (buf->size < astlpc->proto->packet_size(MCTP_PACKET_SIZE(MCTP_BTU))) {
408 		mctp_prerr(
409 			"%s packet buffer too small: Require %" PRIu32 " bytes to support the %u byte baseline transmission unit, found %" PRIu32,
410 			name,
411 			astlpc->proto->packet_size(MCTP_PACKET_SIZE(MCTP_BTU)),
412 			MCTP_BTU, buf->size);
413 		return false;
414 	}
415 
416 	/* Check for overlap with the control space */
417 	if (buf->offset < control_size) {
418 		mctp_prerr(
419 			"%s packet buffer overlaps control region {0x%" PRIx32
420 			", %" PRIu32 "}: Rx {0x%" PRIx32 ", %" PRIu32 "}",
421 			name, 0U, control_size, buf->offset, buf->size);
422 		return false;
423 	}
424 
425 	return true;
426 }
427 
428 static bool
429 mctp_astlpc_layout_validate(const struct mctp_binding_astlpc *astlpc,
430 			    const struct mctp_astlpc_layout *layout)
431 {
432 	const struct mctp_astlpc_buffer *rx = &layout->rx;
433 	const struct mctp_astlpc_buffer *tx = &layout->tx;
434 	bool rx_valid, tx_valid;
435 
436 	rx_valid = mctp_astlpc_buffer_validate(astlpc, rx, "Rx");
437 	tx_valid = mctp_astlpc_buffer_validate(astlpc, tx, "Tx");
438 
439 	if (!(rx_valid && tx_valid))
440 		return false;
441 
442 	/* Check that the buffers are disjoint */
443 	if ((rx->offset <= tx->offset && rx->offset + rx->size > tx->offset) ||
444 	    (tx->offset <= rx->offset && tx->offset + tx->size > rx->offset)) {
445 		mctp_prerr("Rx and Tx packet buffers overlap: Rx {0x%" PRIx32
446 			   ", %" PRIu32 "}, Tx {0x%" PRIx32 ", %" PRIu32 "}",
447 			   rx->offset, rx->size, tx->offset, tx->size);
448 		return false;
449 	}
450 
451 	return true;
452 }
453 
454 static int mctp_astlpc_init_bmc(struct mctp_binding_astlpc *astlpc)
455 {
456 	struct mctp_lpcmap_hdr hdr = { 0 };
457 	uint8_t status;
458 	uint32_t sz;
459 
460 	/*
461 	 * The largest buffer size is half of the allocated MCTP space
462 	 * excluding the control space.
463 	 */
464 	sz = ((LPC_WIN_SIZE - control_size) / 2);
465 
466 	/*
467 	 * Trim the MTU to a multiple of 16 to meet the requirements of 12.17
468 	 * Query Hop in DSP0236 v1.3.0.
469 	 */
470 	sz = MCTP_BODY_SIZE(astlpc->proto->body_size(sz));
471 	sz &= ~0xfUL;
472 	sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(sz));
473 
474 	if (astlpc->requested_mtu) {
475 		uint32_t rpkt, rmtu;
476 
477 		rmtu = astlpc->requested_mtu;
478 		rpkt = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu));
479 		sz = MIN(sz, rpkt);
480 	}
481 
482 	/* Flip the buffers as the names are defined in terms of the host */
483 	astlpc->layout.tx.offset = control_size;
484 	astlpc->layout.tx.size = sz;
485 	astlpc->layout.rx.offset =
486 		astlpc->layout.tx.offset + astlpc->layout.tx.size;
487 	astlpc->layout.rx.size = sz;
488 
489 	if (!mctp_astlpc_layout_validate(astlpc, &astlpc->layout)) {
490 		astlpc_prerr(astlpc, "Cannot support an MTU of %" PRIu32, sz);
491 		return -EINVAL;
492 	}
493 
494 	hdr = (struct mctp_lpcmap_hdr){
495 		.magic = htobe32(ASTLPC_MCTP_MAGIC),
496 		.bmc_ver_min = htobe16(ASTLPC_VER_MIN),
497 		.bmc_ver_cur = htobe16(ASTLPC_VER_CUR),
498 
499 		/* Flip the buffers back as we're now describing the host's
500 		 * configuration to the host */
501 		.layout.rx_offset = htobe32(astlpc->layout.tx.offset),
502 		.layout.rx_size = htobe32(astlpc->layout.tx.size),
503 		.layout.tx_offset = htobe32(astlpc->layout.rx.offset),
504 		.layout.tx_size = htobe32(astlpc->layout.rx.size),
505 	};
506 
507 	mctp_astlpc_lpc_write(astlpc, &hdr, 0, sizeof(hdr));
508 
509 	/*
510 	 * Set status indicating that the BMC is now active. Be explicit about
511 	 * clearing OBF; we're reinitialising the binding and so any previous
512 	 * buffer state is irrelevant.
513 	 */
514 	status = KCS_STATUS_BMC_READY & ~KCS_STATUS_OBF;
515 	return mctp_astlpc_kcs_set_status(astlpc, status);
516 }
517 
518 static int mctp_binding_astlpc_start_bmc(struct mctp_binding *b)
519 {
520 	struct mctp_binding_astlpc *astlpc =
521 		container_of(b, struct mctp_binding_astlpc, binding);
522 
523 	astlpc->proto = &astlpc_protocol_version[ASTLPC_VER_CUR];
524 
525 	return mctp_astlpc_init_bmc(astlpc);
526 }
527 
528 static bool mctp_astlpc_validate_version(uint16_t bmc_ver_min,
529 					 uint16_t bmc_ver_cur,
530 					 uint16_t host_ver_min,
531 					 uint16_t host_ver_cur)
532 {
533 	if (!(bmc_ver_min && bmc_ver_cur && host_ver_min && host_ver_cur)) {
534 		mctp_prerr("Invalid version present in [%" PRIu16 ", %" PRIu16
535 			   "], [%" PRIu16 ", %" PRIu16 "]",
536 			   bmc_ver_min, bmc_ver_cur, host_ver_min,
537 			   host_ver_cur);
538 		return false;
539 	} else if (bmc_ver_min > bmc_ver_cur) {
540 		mctp_prerr("Invalid bmc version range [%" PRIu16 ", %" PRIu16
541 			   "]",
542 			   bmc_ver_min, bmc_ver_cur);
543 		return false;
544 	} else if (host_ver_min > host_ver_cur) {
545 		mctp_prerr("Invalid host version range [%" PRIu16 ", %" PRIu16
546 			   "]",
547 			   host_ver_min, host_ver_cur);
548 		return false;
549 	} else if ((host_ver_cur < bmc_ver_min) ||
550 		   (host_ver_min > bmc_ver_cur)) {
551 		mctp_prerr(
552 			"Unable to satisfy version negotiation with ranges [%" PRIu16
553 			", %" PRIu16 "] and [%" PRIu16 ", %" PRIu16 "]",
554 			bmc_ver_min, bmc_ver_cur, host_ver_min, host_ver_cur);
555 		return false;
556 	}
557 
558 	return true;
559 }
560 
561 static int mctp_astlpc_negotiate_layout_host(struct mctp_binding_astlpc *astlpc)
562 {
563 	struct mctp_astlpc_layout layout;
564 	uint32_t rmtu;
565 	uint32_t sz;
566 	int rc;
567 
568 	rc = mctp_astlpc_layout_read(astlpc, &layout);
569 	if (rc < 0)
570 		return rc;
571 
572 	if (!mctp_astlpc_layout_validate(astlpc, &layout)) {
573 		astlpc_prerr(
574 			astlpc,
575 			"BMC provided invalid buffer layout: Rx {0x%" PRIx32
576 			", %" PRIu32 "}, Tx {0x%" PRIx32 ", %" PRIu32 "}",
577 			layout.rx.offset, layout.rx.size, layout.tx.offset,
578 			layout.tx.size);
579 		return -EINVAL;
580 	}
581 
582 	astlpc_prinfo(astlpc, "Desire an MTU of %" PRIu32 " bytes",
583 		      astlpc->requested_mtu);
584 
585 	rmtu = astlpc->requested_mtu;
586 	sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu));
587 	layout.rx.size = sz;
588 
589 	if (!mctp_astlpc_layout_validate(astlpc, &layout)) {
590 		astlpc_prerr(
591 			astlpc,
592 			"Generated invalid buffer layout with size %" PRIu32
593 			": Rx {0x%" PRIx32 ", %" PRIu32 "}, Tx {0x%" PRIx32
594 			", %" PRIu32 "}",
595 			sz, layout.rx.offset, layout.rx.size, layout.tx.offset,
596 			layout.tx.size);
597 		return -EINVAL;
598 	}
599 
600 	astlpc_prinfo(astlpc, "Requesting MTU of %" PRIu32 " bytes",
601 		      astlpc->requested_mtu);
602 
603 	return mctp_astlpc_layout_write(astlpc, &layout);
604 }
605 
606 static uint16_t mctp_astlpc_negotiate_version(uint16_t bmc_ver_min,
607 					      uint16_t bmc_ver_cur,
608 					      uint16_t host_ver_min,
609 					      uint16_t host_ver_cur)
610 {
611 	if (!mctp_astlpc_validate_version(bmc_ver_min, bmc_ver_cur,
612 					  host_ver_min, host_ver_cur))
613 		return ASTLPC_VER_BAD;
614 
615 	if (bmc_ver_cur < host_ver_cur)
616 		return bmc_ver_cur;
617 
618 	return host_ver_cur;
619 }
620 
621 static int mctp_astlpc_init_host(struct mctp_binding_astlpc *astlpc)
622 {
623 	const uint16_t ver_min_be = htobe16(ASTLPC_VER_MIN);
624 	const uint16_t ver_cur_be = htobe16(ASTLPC_VER_CUR);
625 	uint16_t bmc_ver_min, bmc_ver_cur, negotiated;
626 	struct mctp_lpcmap_hdr hdr;
627 	uint8_t status;
628 	int rc;
629 
630 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status);
631 	if (rc) {
632 		mctp_prwarn("KCS status read failed");
633 		return rc;
634 	}
635 
636 	astlpc->kcs_status = status;
637 
638 	if (!(status & KCS_STATUS_BMC_READY))
639 		return -EHOSTDOWN;
640 
641 	mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
642 
643 	bmc_ver_min = be16toh(hdr.bmc_ver_min);
644 	bmc_ver_cur = be16toh(hdr.bmc_ver_cur);
645 
646 	/* Calculate the expected value of negotiated_ver */
647 	negotiated = mctp_astlpc_negotiate_version(bmc_ver_min, bmc_ver_cur,
648 						   ASTLPC_VER_MIN,
649 						   ASTLPC_VER_CUR);
650 	if (!negotiated) {
651 		astlpc_prerr(astlpc, "Cannot negotiate with invalid versions");
652 		return -EINVAL;
653 	}
654 
655 	/* Assign protocol ops so we can calculate the packet buffer sizes */
656 	assert(negotiated < ARRAY_SIZE(astlpc_protocol_version));
657 	astlpc->proto = &astlpc_protocol_version[negotiated];
658 
659 	/* Negotiate packet buffers in v2 style if the BMC supports it */
660 	if (negotiated >= 2) {
661 		rc = mctp_astlpc_negotiate_layout_host(astlpc);
662 		if (rc < 0)
663 			return rc;
664 	}
665 
666 	/* Advertise the host's supported protocol versions */
667 	mctp_astlpc_lpc_write(astlpc, &ver_min_be,
668 			      offsetof(struct mctp_lpcmap_hdr, host_ver_min),
669 			      sizeof(ver_min_be));
670 
671 	mctp_astlpc_lpc_write(astlpc, &ver_cur_be,
672 			      offsetof(struct mctp_lpcmap_hdr, host_ver_cur),
673 			      sizeof(ver_cur_be));
674 
675 	/* Send channel init command */
676 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, 0x0);
677 	if (rc) {
678 		astlpc_prwarn(astlpc, "KCS write failed");
679 	}
680 
681 	/*
682 	 * Configure the host so `astlpc->proto->version == 0` holds until we
683 	 * receive a subsequent status update from the BMC. Until then,
684 	 * `astlpc->proto->version == 0` indicates that we're yet to complete
685 	 * the channel initialisation handshake.
686 	 *
687 	 * When the BMC provides a status update with KCS_STATUS_CHANNEL_ACTIVE
688 	 * set we will assign the appropriate protocol ops struct in accordance
689 	 * with `negotiated_ver`.
690 	 */
691 	astlpc->proto = &astlpc_protocol_version[ASTLPC_VER_BAD];
692 
693 	return rc;
694 }
695 
696 static int mctp_binding_astlpc_start_host(struct mctp_binding *b)
697 {
698 	struct mctp_binding_astlpc *astlpc =
699 		container_of(b, struct mctp_binding_astlpc, binding);
700 
701 	return mctp_astlpc_init_host(astlpc);
702 }
703 
704 static bool __mctp_astlpc_kcs_ready(struct mctp_binding_astlpc *astlpc,
705 				    uint8_t status, bool is_write)
706 {
707 	bool is_bmc;
708 	bool ready_state;
709 	uint8_t flag;
710 
711 	is_bmc = (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC);
712 	flag = (is_bmc ^ is_write) ? KCS_STATUS_IBF : KCS_STATUS_OBF;
713 	ready_state = is_write ? 0 : 1;
714 
715 	return !!(status & flag) == ready_state;
716 }
717 
718 static inline bool
719 mctp_astlpc_kcs_read_ready(struct mctp_binding_astlpc *astlpc, uint8_t status)
720 {
721 	return __mctp_astlpc_kcs_ready(astlpc, status, false);
722 }
723 
724 static inline bool
725 mctp_astlpc_kcs_write_ready(struct mctp_binding_astlpc *astlpc, uint8_t status)
726 {
727 	return __mctp_astlpc_kcs_ready(astlpc, status, true);
728 }
729 
730 static int mctp_astlpc_kcs_send(struct mctp_binding_astlpc *astlpc,
731 		uint8_t data)
732 {
733 	uint8_t status;
734 	int rc;
735 
736 	for (;;) {
737 		rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS,
738 					  &status);
739 		if (rc) {
740 			astlpc_prwarn(astlpc, "KCS status read failed");
741 			return -1;
742 		}
743 		if (mctp_astlpc_kcs_write_ready(astlpc, status))
744 			break;
745 		/* todo: timeout */
746 	}
747 
748 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, data);
749 	if (rc) {
750 		astlpc_prwarn(astlpc, "KCS data write failed");
751 		return -1;
752 	}
753 
754 	return 0;
755 }
756 
757 static int mctp_binding_astlpc_tx(struct mctp_binding *b,
758 		struct mctp_pktbuf *pkt)
759 {
760 	struct mctp_binding_astlpc *astlpc = binding_to_astlpc(b);
761 	uint32_t len, len_be;
762 	struct mctp_hdr *hdr;
763 
764 	hdr = mctp_pktbuf_hdr(pkt);
765 	len = mctp_pktbuf_size(pkt);
766 
767 	astlpc_prdebug(astlpc,
768 		       "%s: Transmitting %" PRIu32
769 		       "-byte packet (%hhu, %hhu, 0x%hhx)",
770 		       __func__, len, hdr->src, hdr->dest, hdr->flags_seq_tag);
771 
772 	if (len > astlpc->proto->body_size(astlpc->layout.tx.size)) {
773 		astlpc_prwarn(astlpc, "invalid TX len %" PRIu32 ": %" PRIu32, len,
774 				astlpc->proto->body_size(astlpc->layout.tx.size));
775 		return -1;
776 	}
777 
778 	len_be = htobe32(len);
779 	mctp_astlpc_lpc_write(astlpc, &len_be, astlpc->layout.tx.offset,
780 			      sizeof(len_be));
781 
782 	astlpc->proto->pktbuf_protect(pkt);
783 	len = mctp_pktbuf_size(pkt);
784 
785 	mctp_astlpc_lpc_write(astlpc, hdr, astlpc->layout.tx.offset + 4, len);
786 
787 	mctp_binding_set_tx_enabled(b, false);
788 
789 	mctp_astlpc_kcs_send(astlpc, 0x1);
790 
791 	return 0;
792 }
793 
794 static uint32_t mctp_astlpc_calculate_mtu(struct mctp_binding_astlpc *astlpc,
795 					  struct mctp_astlpc_layout *layout)
796 {
797 	uint32_t low, high, limit, rpkt;
798 
799 	/* Derive the largest MTU the BMC _can_ support */
800 	low = MIN(astlpc->layout.rx.offset, astlpc->layout.tx.offset);
801 	high = MAX(astlpc->layout.rx.offset, astlpc->layout.tx.offset);
802 	limit = high - low;
803 
804 	/* Determine the largest MTU the BMC _wants_ to support */
805 	if (astlpc->requested_mtu) {
806 		uint32_t rmtu = astlpc->requested_mtu;
807 
808 		rpkt = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu));
809 		limit = MIN(limit, rpkt);
810 	}
811 
812 	/* Determine the accepted MTU, applied both directions by convention */
813 	rpkt = MIN(limit, layout->tx.size);
814 	return MCTP_BODY_SIZE(astlpc->proto->body_size(rpkt));
815 }
816 
817 static int mctp_astlpc_negotiate_layout_bmc(struct mctp_binding_astlpc *astlpc)
818 {
819 	struct mctp_astlpc_layout proposed, pending;
820 	uint32_t sz, mtu;
821 	int rc;
822 
823 	/* Do we have a valid protocol version? */
824 	if (!astlpc->proto->version)
825 		return -EINVAL;
826 
827 	/* Extract the host's proposed layout */
828 	rc = mctp_astlpc_layout_read(astlpc, &proposed);
829 	if (rc < 0)
830 		return rc;
831 
832 	/* Do we have a reasonable layout? */
833 	if (!mctp_astlpc_layout_validate(astlpc, &proposed))
834 		return -EINVAL;
835 
836 	/* Negotiate the MTU */
837 	mtu = mctp_astlpc_calculate_mtu(astlpc, &proposed);
838 	sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(mtu));
839 
840 	/*
841 	 * Use symmetric MTUs by convention and to pass constraints in rx/tx
842 	 * functions
843 	 */
844 	pending = astlpc->layout;
845 	pending.tx.size = sz;
846 	pending.rx.size = sz;
847 
848 	if (mctp_astlpc_layout_validate(astlpc, &pending)) {
849 		/* We found a sensible Rx MTU, so honour it */
850 		astlpc->layout = pending;
851 
852 		/* Enforce the negotiated MTU */
853 		rc = mctp_astlpc_layout_write(astlpc, &astlpc->layout);
854 		if (rc < 0)
855 			return rc;
856 
857 		astlpc_prinfo(astlpc, "Negotiated an MTU of %" PRIu32 " bytes",
858 			      mtu);
859 	} else {
860 		astlpc_prwarn(astlpc, "MTU negotiation failed");
861 		return -EINVAL;
862 	}
863 
864 	if (astlpc->proto->version >= 2)
865 		astlpc->binding.pkt_size = MCTP_PACKET_SIZE(mtu);
866 
867 	return 0;
868 }
869 
870 static void mctp_astlpc_init_channel(struct mctp_binding_astlpc *astlpc)
871 {
872 	uint16_t negotiated, negotiated_be;
873 	struct mctp_lpcmap_hdr hdr;
874 	uint8_t status;
875 	int rc;
876 
877 	mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
878 
879 	/* Version negotiation */
880 	negotiated =
881 		mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_CUR,
882 					      be16toh(hdr.host_ver_min),
883 					      be16toh(hdr.host_ver_cur));
884 
885 	/* MTU negotiation requires knowing which protocol we'll use */
886 	assert(negotiated < ARRAY_SIZE(astlpc_protocol_version));
887 	astlpc->proto = &astlpc_protocol_version[negotiated];
888 
889 	/* Host Rx MTU negotiation: Failure terminates channel init */
890 	rc = mctp_astlpc_negotiate_layout_bmc(astlpc);
891 	if (rc < 0)
892 		negotiated = ASTLPC_VER_BAD;
893 
894 	/* Populate the negotiated version */
895 	negotiated_be = htobe16(negotiated);
896 	mctp_astlpc_lpc_write(astlpc, &negotiated_be,
897 			      offsetof(struct mctp_lpcmap_hdr, negotiated_ver),
898 			      sizeof(negotiated_be));
899 
900 	/* Finalise the configuration */
901 	status = KCS_STATUS_BMC_READY | KCS_STATUS_OBF;
902 	if (negotiated > 0) {
903 		astlpc_prinfo(astlpc, "Negotiated binding version %" PRIu16,
904 			      negotiated);
905 		status |= KCS_STATUS_CHANNEL_ACTIVE;
906 	} else {
907 		astlpc_prerr(astlpc, "Failed to initialise channel");
908 	}
909 
910 	mctp_astlpc_kcs_set_status(astlpc, status);
911 
912 	mctp_binding_set_tx_enabled(&astlpc->binding,
913 				    status & KCS_STATUS_CHANNEL_ACTIVE);
914 }
915 
916 static void mctp_astlpc_rx_start(struct mctp_binding_astlpc *astlpc)
917 {
918 	struct mctp_pktbuf *pkt;
919 	uint32_t body, packet;
920 
921 	mctp_astlpc_lpc_read(astlpc, &body, astlpc->layout.rx.offset,
922 			     sizeof(body));
923 	body = be32toh(body);
924 
925 	if (body > astlpc->proto->body_size(astlpc->layout.rx.size)) {
926 		astlpc_prwarn(astlpc, "invalid RX len 0x%x", body);
927 		return;
928 	}
929 
930 	assert(astlpc->binding.pkt_size >= 0);
931 	if (body > (uint32_t)astlpc->binding.pkt_size) {
932 		astlpc_prwarn(astlpc, "invalid RX len 0x%x", body);
933 		return;
934 	}
935 
936 	/* Eliminate the medium-specific header that we just read */
937 	packet = astlpc->proto->packet_size(body) - 4;
938 	pkt = mctp_pktbuf_alloc(&astlpc->binding, packet);
939 	if (!pkt) {
940 		astlpc_prwarn(astlpc, "unable to allocate pktbuf len 0x%x", packet);
941 		return;
942 	}
943 
944 	/*
945 	 * Read payload and medium-specific trailer from immediately after the
946 	 * medium-specific header.
947 	 */
948 	mctp_astlpc_lpc_read(astlpc, mctp_pktbuf_hdr(pkt),
949 			     astlpc->layout.rx.offset + 4, packet);
950 
951 	/* Inform the other side of the MCTP interface that we have read
952 	 * the packet off the bus before handling the contents of the packet.
953 	 */
954 	mctp_astlpc_kcs_send(astlpc, 0x2);
955 
956 	/*
957 	 * v3 will validate the CRC32 in the medium-specific trailer and adjust
958 	 * the packet size accordingly. On older protocols validation is a no-op
959 	 * that always returns true.
960 	 */
961 	if (astlpc->proto->pktbuf_validate(pkt)) {
962 		mctp_bus_rx(&astlpc->binding, pkt);
963 	} else {
964 		/* TODO: Drop any associated assembly */
965 		mctp_pktbuf_free(pkt);
966 		astlpc_prdebug(astlpc, "Dropped corrupt packet");
967 	}
968 }
969 
970 static void mctp_astlpc_tx_complete(struct mctp_binding_astlpc *astlpc)
971 {
972 	mctp_binding_set_tx_enabled(&astlpc->binding, true);
973 }
974 
975 static int mctp_astlpc_finalise_channel(struct mctp_binding_astlpc *astlpc)
976 {
977 	struct mctp_astlpc_layout layout;
978 	uint16_t negotiated;
979 	int rc;
980 
981 	rc = mctp_astlpc_lpc_read(astlpc, &negotiated,
982 				  offsetof(struct mctp_lpcmap_hdr,
983 					   negotiated_ver),
984 				  sizeof(negotiated));
985 	if (rc < 0)
986 		return rc;
987 
988 	negotiated = be16toh(negotiated);
989 	astlpc_prerr(astlpc, "Version negotiation got: %u", negotiated);
990 
991 	if (negotiated == ASTLPC_VER_BAD || negotiated < ASTLPC_VER_MIN ||
992 	    negotiated > ASTLPC_VER_CUR) {
993 		astlpc_prerr(astlpc, "Failed to negotiate version, got: %u\n",
994 			     negotiated);
995 		return -EINVAL;
996 	}
997 
998 	assert(negotiated < ARRAY_SIZE(astlpc_protocol_version));
999 	astlpc->proto = &astlpc_protocol_version[negotiated];
1000 
1001 	rc = mctp_astlpc_layout_read(astlpc, &layout);
1002 	if (rc < 0)
1003 		return rc;
1004 
1005 	if (!mctp_astlpc_layout_validate(astlpc, &layout)) {
1006 		mctp_prerr("BMC proposed invalid buffer parameters");
1007 		return -EINVAL;
1008 	}
1009 
1010 	astlpc->layout = layout;
1011 
1012 	if (negotiated >= 2)
1013 		astlpc->binding.pkt_size =
1014 			astlpc->proto->body_size(astlpc->layout.tx.size);
1015 
1016 	return 0;
1017 }
1018 
1019 static int mctp_astlpc_update_channel(struct mctp_binding_astlpc *astlpc,
1020 				      uint8_t status)
1021 {
1022 	uint8_t updated;
1023 	int rc = 0;
1024 
1025 	assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
1026 
1027 	updated = astlpc->kcs_status ^ status;
1028 
1029 	astlpc_prdebug(astlpc, "%s: status: 0x%x, update: 0x%x", __func__,
1030 		       status, updated);
1031 
1032 	if (updated & KCS_STATUS_BMC_READY) {
1033 		if (status & KCS_STATUS_BMC_READY) {
1034 			astlpc->kcs_status = status;
1035 			return astlpc->binding.start(&astlpc->binding);
1036 		} else {
1037 			mctp_binding_set_tx_enabled(&astlpc->binding, false);
1038 		}
1039 	}
1040 
1041 	if (astlpc->proto->version == 0 ||
1042 			updated & KCS_STATUS_CHANNEL_ACTIVE) {
1043 		bool enable;
1044 
1045 		rc = mctp_astlpc_finalise_channel(astlpc);
1046 		enable = (status & KCS_STATUS_CHANNEL_ACTIVE) && rc == 0;
1047 
1048 		mctp_binding_set_tx_enabled(&astlpc->binding, enable);
1049 	}
1050 
1051 	astlpc->kcs_status = status;
1052 
1053 	return rc;
1054 }
1055 
1056 int mctp_astlpc_poll(struct mctp_binding_astlpc *astlpc)
1057 {
1058 	uint8_t status, data;
1059 	int rc;
1060 
1061 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status);
1062 	if (rc) {
1063 		astlpc_prwarn(astlpc, "KCS read error");
1064 		return -1;
1065 	}
1066 
1067 	astlpc_prdebug(astlpc, "%s: status: 0x%hhx", __func__, status);
1068 
1069 	if (!mctp_astlpc_kcs_read_ready(astlpc, status))
1070 		return 0;
1071 
1072 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_DATA, &data);
1073 	if (rc) {
1074 		astlpc_prwarn(astlpc, "KCS data read error");
1075 		return -1;
1076 	}
1077 
1078 	astlpc_prdebug(astlpc, "%s: data: 0x%hhx", __func__, data);
1079 
1080 	if (!astlpc->proto->version && !(data == 0x0 || data == 0xff)) {
1081 		astlpc_prwarn(astlpc, "Invalid message for binding state: 0x%x",
1082 			      data);
1083 		return 0;
1084 	}
1085 
1086 	switch (data) {
1087 	case 0x0:
1088 		mctp_astlpc_init_channel(astlpc);
1089 		break;
1090 	case 0x1:
1091 		mctp_astlpc_rx_start(astlpc);
1092 		break;
1093 	case 0x2:
1094 		mctp_astlpc_tx_complete(astlpc);
1095 		break;
1096 	case 0xff:
1097 		/* No responsibilities for the BMC on 0xff */
1098 		if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST) {
1099 			rc = mctp_astlpc_update_channel(astlpc, status);
1100 			if (rc < 0)
1101 				return rc;
1102 		}
1103 		break;
1104 	default:
1105 		astlpc_prwarn(astlpc, "unknown message 0x%x", data);
1106 	}
1107 
1108 	/* Handle silent loss of bmc-ready */
1109 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST) {
1110 		if (!(status & KCS_STATUS_BMC_READY && data == 0xff))
1111 			return mctp_astlpc_update_channel(astlpc, status);
1112 	}
1113 
1114 	return rc;
1115 }
1116 
1117 /* allocate and basic initialisation */
1118 static struct mctp_binding_astlpc *__mctp_astlpc_init(uint8_t mode,
1119 						      uint32_t mtu)
1120 {
1121 	struct mctp_binding_astlpc *astlpc;
1122 
1123 	assert((mode == MCTP_BINDING_ASTLPC_MODE_BMC) ||
1124 	       (mode == MCTP_BINDING_ASTLPC_MODE_HOST));
1125 
1126 	astlpc = __mctp_alloc(sizeof(*astlpc));
1127 	if (!astlpc)
1128 		return NULL;
1129 
1130 	memset(astlpc, 0, sizeof(*astlpc));
1131 	astlpc->mode = mode;
1132 	astlpc->lpc_map = NULL;
1133 	astlpc->requested_mtu = mtu;
1134 	astlpc->binding.name = "astlpc";
1135 	astlpc->binding.version = 1;
1136 	astlpc->binding.pkt_size =
1137 		MCTP_PACKET_SIZE(mtu > MCTP_BTU ? mtu : MCTP_BTU);
1138 	astlpc->binding.pkt_header = 4;
1139 	astlpc->binding.pkt_trailer = 4;
1140 	astlpc->binding.tx = mctp_binding_astlpc_tx;
1141 	if (mode == MCTP_BINDING_ASTLPC_MODE_BMC)
1142 		astlpc->binding.start = mctp_binding_astlpc_start_bmc;
1143 	else if (mode == MCTP_BINDING_ASTLPC_MODE_HOST)
1144 		astlpc->binding.start = mctp_binding_astlpc_start_host;
1145 	else {
1146 		astlpc_prerr(astlpc, "%s: Invalid mode: %d\n", __func__, mode);
1147 		__mctp_free(astlpc);
1148 		return NULL;
1149 	}
1150 
1151 	return astlpc;
1152 }
1153 
1154 struct mctp_binding *mctp_binding_astlpc_core(struct mctp_binding_astlpc *b)
1155 {
1156 	return &b->binding;
1157 }
1158 
1159 struct mctp_binding_astlpc *
1160 mctp_astlpc_init(uint8_t mode, uint32_t mtu, void *lpc_map,
1161 		 const struct mctp_binding_astlpc_ops *ops, void *ops_data)
1162 {
1163 	struct mctp_binding_astlpc *astlpc;
1164 
1165 	if (!(mode == MCTP_BINDING_ASTLPC_MODE_BMC ||
1166 	      mode == MCTP_BINDING_ASTLPC_MODE_HOST)) {
1167 		mctp_prerr("Unknown binding mode: %u", mode);
1168 		return NULL;
1169 	}
1170 
1171 	astlpc = __mctp_astlpc_init(mode, mtu);
1172 	if (!astlpc)
1173 		return NULL;
1174 
1175 	memcpy(&astlpc->ops, ops, sizeof(astlpc->ops));
1176 	astlpc->ops_data = ops_data;
1177 	astlpc->lpc_map = lpc_map;
1178 	astlpc->mode = mode;
1179 
1180 	return astlpc;
1181 }
1182 
1183 struct mctp_binding_astlpc *
1184 mctp_astlpc_init_ops(const struct mctp_binding_astlpc_ops *ops, void *ops_data,
1185 		     void *lpc_map)
1186 {
1187 	return mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, lpc_map,
1188 				ops, ops_data);
1189 }
1190 
1191 void mctp_astlpc_destroy(struct mctp_binding_astlpc *astlpc)
1192 {
1193 	/* Clear channel-active and bmc-ready */
1194 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC)
1195 		mctp_astlpc_kcs_set_status(astlpc, 0);
1196 	__mctp_free(astlpc);
1197 }
1198 
1199 #ifdef MCTP_HAVE_FILEIO
1200 
1201 static int mctp_astlpc_init_fileio_lpc(struct mctp_binding_astlpc *astlpc)
1202 {
1203 	struct aspeed_lpc_ctrl_mapping map = {
1204 		.window_type = ASPEED_LPC_CTRL_WINDOW_MEMORY,
1205 		.window_id = 0, /* There's only one */
1206 		.flags = 0,
1207 		.addr = 0,
1208 		.offset = 0,
1209 		.size = 0
1210 	};
1211 	void *lpc_map_base;
1212 	int fd, rc;
1213 
1214 	fd = open(lpc_path, O_RDWR | O_SYNC);
1215 	if (fd < 0) {
1216 		astlpc_prwarn(astlpc, "LPC open (%s) failed", lpc_path);
1217 		return -1;
1218 	}
1219 
1220 	rc = ioctl(fd, ASPEED_LPC_CTRL_IOCTL_GET_SIZE, &map);
1221 	if (rc) {
1222 		astlpc_prwarn(astlpc, "LPC GET_SIZE failed");
1223 		close(fd);
1224 		return -1;
1225 	}
1226 
1227 	/*
1228 	 * ������
1229 	 *
1230 	 * Decouple ourselves from hiomapd[1] (another user of the FW2AHB) by
1231 	 * mapping the FW2AHB to the reserved memory here as well.
1232 	 *
1233 	 * It's not possible to use the MCTP ASTLPC binding on machines that
1234 	 * need the FW2AHB bridge mapped anywhere except to the reserved memory
1235 	 * (e.g. the host SPI NOR).
1236 	 *
1237 	 * [1] https://github.com/openbmc/hiomapd/
1238 	 *
1239 	 * ������
1240 	 *
1241 	 * The following calculation must align with what's going on in
1242 	 * hiomapd's lpc.c so as not to disrupt its behaviour:
1243 	 *
1244 	 * https://github.com/openbmc/hiomapd/blob/5ff50e3cbd7702aefc185264e4adfb9952040575/lpc.c#L68
1245 	 *
1246 	 * ������
1247 	 */
1248 
1249 	/* Map the reserved memory at the top of the 28-bit LPC firmware address space */
1250 	map.addr = 0x0FFFFFFF & -map.size;
1251 	astlpc_prinfo(astlpc,
1252 		      "Configuring FW2AHB to map reserved memory at 0x%08x for 0x%x in the LPC FW cycle address-space",
1253 		      map.addr, map.size);
1254 
1255 	rc = ioctl(fd, ASPEED_LPC_CTRL_IOCTL_MAP, &map);
1256 	if (rc) {
1257 		astlpc_prwarn(astlpc, "Failed to map FW2AHB to reserved memory");
1258 		close(fd);
1259 		return -1;
1260 	}
1261 
1262 	/* Map the reserved memory into our address space */
1263 	lpc_map_base =
1264 		mmap(NULL, map.size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1265 	if (lpc_map_base == MAP_FAILED) {
1266 		astlpc_prwarn(astlpc, "LPC mmap failed");
1267 		rc = -1;
1268 	} else {
1269 		astlpc->lpc_map = lpc_map_base + map.size - LPC_WIN_SIZE;
1270 	}
1271 
1272 	close(fd);
1273 
1274 	return rc;
1275 }
1276 
1277 static int mctp_astlpc_init_fileio_kcs(struct mctp_binding_astlpc *astlpc)
1278 {
1279 	astlpc->kcs_fd = open(kcs_path, O_RDWR);
1280 	if (astlpc->kcs_fd < 0)
1281 		return -1;
1282 
1283 	return 0;
1284 }
1285 
1286 static int __mctp_astlpc_fileio_kcs_read(void *arg,
1287 		enum mctp_binding_astlpc_kcs_reg reg, uint8_t *val)
1288 {
1289 	struct mctp_binding_astlpc *astlpc = arg;
1290 	off_t offset = reg;
1291 	int rc;
1292 
1293 	rc = pread(astlpc->kcs_fd, val, 1, offset);
1294 
1295 	return rc == 1 ? 0 : -1;
1296 }
1297 
1298 static int __mctp_astlpc_fileio_kcs_write(void *arg,
1299 		enum mctp_binding_astlpc_kcs_reg reg, uint8_t val)
1300 {
1301 	struct mctp_binding_astlpc *astlpc = arg;
1302 	off_t offset = reg;
1303 	int rc;
1304 
1305 	rc = pwrite(astlpc->kcs_fd, &val, 1, offset);
1306 
1307 	return rc == 1 ? 0 : -1;
1308 }
1309 
1310 int mctp_astlpc_init_pollfd(struct mctp_binding_astlpc *astlpc,
1311 			    struct pollfd *pollfd)
1312 {
1313 	pollfd->fd = astlpc->kcs_fd;
1314 	pollfd->events = POLLIN;
1315 
1316 	return 0;
1317 }
1318 
1319 struct mctp_binding_astlpc *mctp_astlpc_init_fileio(void)
1320 {
1321 	struct mctp_binding_astlpc *astlpc;
1322 	int rc;
1323 
1324 	/*
1325 	 * If we're doing file IO then we're very likely not running
1326 	 * freestanding, so lets assume that we're on the BMC side.
1327 	 *
1328 	 * Requesting an MTU of 0 requests the largest possible MTU, whatever
1329 	 * value that might take.
1330 	 */
1331 	astlpc = __mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, 0);
1332 	if (!astlpc)
1333 		return NULL;
1334 
1335 	/* Set internal operations for kcs. We use direct accesses to the lpc
1336 	 * map area */
1337 	astlpc->ops.kcs_read = __mctp_astlpc_fileio_kcs_read;
1338 	astlpc->ops.kcs_write = __mctp_astlpc_fileio_kcs_write;
1339 	astlpc->ops_data = astlpc;
1340 
1341 	rc = mctp_astlpc_init_fileio_lpc(astlpc);
1342 	if (rc) {
1343 		free(astlpc);
1344 		return NULL;
1345 	}
1346 
1347 	rc = mctp_astlpc_init_fileio_kcs(astlpc);
1348 	if (rc) {
1349 		free(astlpc);
1350 		return NULL;
1351 	}
1352 
1353 	return astlpc;
1354 }
1355 #else
1356 struct mctp_binding_astlpc *mctp_astlpc_init_fileio(void)
1357 {
1358 	mctp_prlog(MCTP_LOG_ERR, "%s: Missing support for file IO", __func__);
1359 	return NULL;
1360 }
1361 
1362 int mctp_astlpc_init_pollfd(struct mctp_binding_astlpc *astlpc __unused,
1363 			    struct pollfd *pollfd __unused)
1364 {
1365 	mctp_prlog(MCTP_LOG_ERR, "%s: Missing support for file IO", __func__);
1366 	return -1;
1367 }
1368 #endif
1369