xref: /openbmc/libmctp/astlpc.c (revision 58a4542a)
1 /* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */
2 
3 #if HAVE_CONFIG_H
4 #include "config.h"
5 #endif
6 
7 #if HAVE_ENDIAN_H
8 #include <endian.h>
9 #endif
10 
11 #include <assert.h>
12 #include <err.h>
13 #include <errno.h>
14 #include <inttypes.h>
15 #include <stdbool.h>
16 #include <stdlib.h>
17 #include <string.h>
18 
19 #define pr_fmt(x) "astlpc: " x
20 
21 #include "libmctp.h"
22 #include "libmctp-alloc.h"
23 #include "libmctp-log.h"
24 #include "libmctp-astlpc.h"
25 #include "container_of.h"
26 #include "range.h"
27 
28 #ifdef MCTP_HAVE_FILEIO
29 
30 #include <unistd.h>
31 #include <fcntl.h>
32 #include <sys/ioctl.h>
33 #include <sys/mman.h>
34 #include <linux/aspeed-lpc-ctrl.h>
35 
36 /* kernel interface */
37 static const char *kcs_path = "/dev/mctp0";
38 static const char *lpc_path = "/dev/aspeed-lpc-ctrl";
39 
40 #endif
41 
42 struct mctp_astlpc_buffer {
43 	uint32_t offset;
44 	uint32_t size;
45 };
46 
47 struct mctp_astlpc_layout {
48 	struct mctp_astlpc_buffer rx;
49 	struct mctp_astlpc_buffer tx;
50 };
51 
52 struct mctp_binding_astlpc {
53 	struct mctp_binding	binding;
54 
55 	void *lpc_map;
56 	struct mctp_astlpc_layout layout;
57 
58 	uint8_t mode;
59 	uint16_t version;
60 	uint32_t requested_mtu;
61 
62 	/* direct ops data */
63 	struct mctp_binding_astlpc_ops ops;
64 	void *ops_data;
65 
66 	/* fileio ops data */
67 	int kcs_fd;
68 	uint8_t kcs_status;
69 
70 	bool			running;
71 };
72 
73 #define binding_to_astlpc(b) \
74 	container_of(b, struct mctp_binding_astlpc, binding)
75 
76 #define astlpc_prlog(ctx, lvl, fmt, ...)                                       \
77 	do {                                                                   \
78 		bool __bmc = ((ctx)->mode == MCTP_BINDING_ASTLPC_MODE_BMC);    \
79 		mctp_prlog(lvl, pr_fmt("%s: " fmt), __bmc ? "bmc" : "host",    \
80 			   ##__VA_ARGS__);                                     \
81 	} while (0)
82 
83 #define astlpc_prerr(ctx, fmt, ...)                                            \
84 	astlpc_prlog(ctx, MCTP_LOG_ERR, fmt, ##__VA_ARGS__)
85 #define astlpc_prwarn(ctx, fmt, ...)                                           \
86 	astlpc_prlog(ctx, MCTP_LOG_WARNING, fmt, ##__VA_ARGS__)
87 #define astlpc_prinfo(ctx, fmt, ...)                                           \
88 	astlpc_prlog(ctx, MCTP_LOG_INFO, fmt, ##__VA_ARGS__)
89 #define astlpc_prdebug(ctx, fmt, ...)                                          \
90 	astlpc_prlog(ctx, MCTP_LOG_DEBUG, fmt, ##__VA_ARGS__)
91 
92 /* clang-format off */
93 #define ASTLPC_MCTP_MAGIC	0x4d435450
94 #define ASTLPC_VER_BAD	0
95 #define ASTLPC_VER_MIN	1
96 
97 /* Support testing of new binding protocols */
98 #ifndef ASTLPC_VER_CUR
99 #define ASTLPC_VER_CUR	2
100 #endif
101 
102 #define ASTLPC_PACKET_SIZE(sz)	(4 + (sz))
103 #define ASTLPC_BODY_SIZE(sz)	((sz) - 4)
104 /* clang-format on */
105 
106 struct mctp_lpcmap_hdr {
107 	uint32_t magic;
108 
109 	uint16_t bmc_ver_min;
110 	uint16_t bmc_ver_cur;
111 	uint16_t host_ver_min;
112 	uint16_t host_ver_cur;
113 	uint16_t negotiated_ver;
114 	uint16_t pad0;
115 
116 	struct {
117 		uint32_t rx_offset;
118 		uint32_t rx_size;
119 		uint32_t tx_offset;
120 		uint32_t tx_size;
121 	} layout;
122 } __attribute__((packed));
123 
124 static const uint32_t control_size = 0x100;
125 
126 #define LPC_WIN_SIZE                (1 * 1024 * 1024)
127 
128 #define KCS_STATUS_BMC_READY		0x80
129 #define KCS_STATUS_CHANNEL_ACTIVE	0x40
130 #define KCS_STATUS_IBF			0x02
131 #define KCS_STATUS_OBF			0x01
132 
133 static inline int mctp_astlpc_kcs_write(struct mctp_binding_astlpc *astlpc,
134 					enum mctp_binding_astlpc_kcs_reg reg,
135 					uint8_t val)
136 {
137 	return astlpc->ops.kcs_write(astlpc->ops_data, reg, val);
138 }
139 
140 static inline int mctp_astlpc_kcs_read(struct mctp_binding_astlpc *astlpc,
141 				       enum mctp_binding_astlpc_kcs_reg reg,
142 				       uint8_t *val)
143 {
144 	return astlpc->ops.kcs_read(astlpc->ops_data, reg, val);
145 }
146 
147 static inline int mctp_astlpc_lpc_write(struct mctp_binding_astlpc *astlpc,
148 					const void *buf, long offset,
149 					size_t len)
150 {
151 	astlpc_prdebug(astlpc, "%s: %zu bytes to 0x%lx", __func__, len, offset);
152 
153 	assert(offset >= 0);
154 
155 	/* Indirect access */
156 	if (astlpc->ops.lpc_write) {
157 		void *data = astlpc->ops_data;
158 
159 		return astlpc->ops.lpc_write(data, buf, offset, len);
160 	}
161 
162 	/* Direct mapping */
163 	assert(astlpc->lpc_map);
164 	memcpy(&((char *)astlpc->lpc_map)[offset], buf, len);
165 
166 	return 0;
167 }
168 
169 static inline int mctp_astlpc_lpc_read(struct mctp_binding_astlpc *astlpc,
170 				       void *buf, long offset, size_t len)
171 {
172 	astlpc_prdebug(astlpc, "%s: %zu bytes from 0x%lx", __func__, len,
173 		       offset);
174 
175 	assert(offset >= 0);
176 
177 	/* Indirect access */
178 	if (astlpc->ops.lpc_read) {
179 		void *data = astlpc->ops_data;
180 
181 		return astlpc->ops.lpc_read(data, buf, offset, len);
182 	}
183 
184 	/* Direct mapping */
185 	assert(astlpc->lpc_map);
186 	memcpy(buf, &((char *)astlpc->lpc_map)[offset], len);
187 
188 	return 0;
189 }
190 
191 static int mctp_astlpc_kcs_set_status(struct mctp_binding_astlpc *astlpc,
192 				      uint8_t status)
193 {
194 	uint8_t data;
195 	int rc;
196 
197 	/* Since we're setting the status register, we want the other endpoint
198 	 * to be interrupted. However, some hardware may only raise a host-side
199 	 * interrupt on an ODR event.
200 	 * So, write a dummy value of 0xff to ODR, which will ensure that an
201 	 * interrupt is triggered, and can be ignored by the host.
202 	 */
203 	data = 0xff;
204 
205 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, status);
206 	if (rc) {
207 		astlpc_prwarn(astlpc, "KCS status write failed");
208 		return -1;
209 	}
210 
211 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, data);
212 	if (rc) {
213 		astlpc_prwarn(astlpc, "KCS dummy data write failed");
214 		return -1;
215 	}
216 
217 	return 0;
218 }
219 
220 static int mctp_astlpc_layout_read(struct mctp_binding_astlpc *astlpc,
221 				   struct mctp_astlpc_layout *layout)
222 {
223 	struct mctp_lpcmap_hdr hdr;
224 	int rc;
225 
226 	rc = mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
227 	if (rc < 0)
228 		return rc;
229 
230 	/* Flip the buffers as the names are defined in terms of the host */
231 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) {
232 		layout->rx.offset = be32toh(hdr.layout.tx_offset);
233 		layout->rx.size = be32toh(hdr.layout.tx_size);
234 		layout->tx.offset = be32toh(hdr.layout.rx_offset);
235 		layout->tx.size = be32toh(hdr.layout.rx_size);
236 	} else {
237 		assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
238 
239 		layout->rx.offset = be32toh(hdr.layout.rx_offset);
240 		layout->rx.size = be32toh(hdr.layout.rx_size);
241 		layout->tx.offset = be32toh(hdr.layout.tx_offset);
242 		layout->tx.size = be32toh(hdr.layout.tx_size);
243 	}
244 
245 	return 0;
246 }
247 
248 static int mctp_astlpc_layout_write(struct mctp_binding_astlpc *astlpc,
249 				    struct mctp_astlpc_layout *layout)
250 {
251 	uint32_t rx_size_be;
252 
253 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) {
254 		struct mctp_lpcmap_hdr hdr;
255 
256 		/*
257 		 * Flip the buffers as the names are defined in terms of the
258 		 * host
259 		 */
260 		hdr.layout.rx_offset = htobe32(layout->tx.offset);
261 		hdr.layout.rx_size = htobe32(layout->tx.size);
262 		hdr.layout.tx_offset = htobe32(layout->rx.offset);
263 		hdr.layout.tx_size = htobe32(layout->rx.size);
264 
265 		return mctp_astlpc_lpc_write(astlpc, &hdr.layout,
266 				offsetof(struct mctp_lpcmap_hdr, layout),
267 				sizeof(hdr.layout));
268 	}
269 
270 	assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
271 
272 	/*
273 	 * As of v2 we only need to write rx_size - the offsets are controlled
274 	 * by the BMC, as is the BMC's rx_size (host tx_size).
275 	 */
276 	rx_size_be = htobe32(layout->rx.size);
277 	return mctp_astlpc_lpc_write(astlpc, &rx_size_be,
278 			offsetof(struct mctp_lpcmap_hdr, layout.rx_size),
279 			sizeof(rx_size_be));
280 }
281 
282 static bool mctp_astlpc_buffer_validate(struct mctp_astlpc_buffer *buf,
283 					const char *name)
284 {
285 	/* Check for overflow */
286 	if (buf->offset + buf->size < buf->offset) {
287 		mctp_prerr(
288 			"%s packet buffer parameters overflow: offset: 0x%" PRIx32
289 			", size: %" PRIu32,
290 			name, buf->offset, buf->size);
291 		return false;
292 	}
293 
294 	/* Check that the buffers are contained within the allocated space */
295 	if (buf->offset + buf->size > LPC_WIN_SIZE) {
296 		mctp_prerr(
297 			"%s packet buffer parameters exceed %uM window size: offset: 0x%" PRIx32
298 			", size: %" PRIu32,
299 			name, (LPC_WIN_SIZE / (1024 * 1024)), buf->offset,
300 			buf->size);
301 		return false;
302 	}
303 
304 	/* Check that the baseline transmission unit is supported */
305 	if (buf->size < ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(MCTP_BTU))) {
306 		mctp_prerr(
307 			"%s packet buffer too small: Require %zu bytes to support the %u byte baseline transmission unit, found %" PRIu32,
308 			name, ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(MCTP_BTU)),
309 			MCTP_BTU, buf->size);
310 		return false;
311 	}
312 
313 	/* Check for overlap with the control space */
314 	if (buf->offset < control_size) {
315 		mctp_prerr(
316 			"%s packet buffer overlaps control region {0x%" PRIx32
317 			", %" PRIu32 "}: Rx {0x%" PRIx32 ", %" PRIu32 "}",
318 			name, 0U, control_size, buf->offset, buf->size);
319 		return false;
320 	}
321 
322 	return true;
323 }
324 
325 static bool mctp_astlpc_layout_validate(struct mctp_astlpc_layout *layout)
326 {
327 	struct mctp_astlpc_buffer *rx = &layout->rx;
328 	struct mctp_astlpc_buffer *tx = &layout->tx;
329 	bool rx_valid, tx_valid;
330 
331 	rx_valid = mctp_astlpc_buffer_validate(rx, "Rx");
332 	tx_valid = mctp_astlpc_buffer_validate(tx, "Tx");
333 
334 	if (!(rx_valid && tx_valid))
335 		return false;
336 
337 	/* Check that the buffers are disjoint */
338 	if ((rx->offset <= tx->offset && rx->offset + rx->size > tx->offset) ||
339 	    (tx->offset <= rx->offset && tx->offset + tx->size > rx->offset)) {
340 		mctp_prerr("Rx and Tx packet buffers overlap: Rx {0x%" PRIx32
341 			   ", %" PRIu32 "}, Tx {0x%" PRIx32 ", %" PRIu32 "}",
342 			   rx->offset, rx->size, tx->offset, tx->size);
343 		return false;
344 	}
345 
346 	return true;
347 }
348 
349 static int mctp_astlpc_init_bmc(struct mctp_binding_astlpc *astlpc)
350 {
351 	struct mctp_lpcmap_hdr hdr = { 0 };
352 	uint8_t status;
353 	size_t sz;
354 
355 	/*
356 	 * The largest buffer size is half of the allocated MCTP space
357 	 * excluding the control space.
358 	 */
359 	sz = ((LPC_WIN_SIZE - control_size) / 2);
360 
361 	/*
362 	 * Trim the MTU to a multiple of 16 to meet the requirements of 12.17
363 	 * Query Hop in DSP0236 v1.3.0.
364 	 */
365 	sz = MCTP_BODY_SIZE(ASTLPC_BODY_SIZE(sz));
366 	sz &= ~0xfUL;
367 	sz = ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(sz));
368 
369 	if (astlpc->requested_mtu) {
370 		size_t r;
371 
372 		r = ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(astlpc->requested_mtu));
373 		sz = MIN(sz, r);
374 	}
375 
376 	/* Flip the buffers as the names are defined in terms of the host */
377 	astlpc->layout.tx.offset = control_size;
378 	astlpc->layout.tx.size = sz;
379 	astlpc->layout.rx.offset =
380 		astlpc->layout.tx.offset + astlpc->layout.tx.size;
381 	astlpc->layout.rx.size = sz;
382 
383 	if (!mctp_astlpc_layout_validate(&astlpc->layout)) {
384 		astlpc_prerr(astlpc, "Cannot support an MTU of %zu", sz);
385 		return -EINVAL;
386 	}
387 
388 	hdr = (struct mctp_lpcmap_hdr){
389 		.magic = htobe32(ASTLPC_MCTP_MAGIC),
390 		.bmc_ver_min = htobe16(ASTLPC_VER_MIN),
391 		.bmc_ver_cur = htobe16(ASTLPC_VER_CUR),
392 
393 		/* Flip the buffers back as we're now describing the host's
394 		 * configuration to the host */
395 		.layout.rx_offset = htobe32(astlpc->layout.tx.offset),
396 		.layout.rx_size = htobe32(astlpc->layout.tx.size),
397 		.layout.tx_offset = htobe32(astlpc->layout.rx.offset),
398 		.layout.tx_size = htobe32(astlpc->layout.rx.size),
399 	};
400 
401 	mctp_astlpc_lpc_write(astlpc, &hdr, 0, sizeof(hdr));
402 
403 	/*
404 	 * Set status indicating that the BMC is now active. Be explicit about
405 	 * clearing OBF; we're reinitialising the binding and so any previous
406 	 * buffer state is irrelevant.
407 	 */
408 	status = KCS_STATUS_BMC_READY & ~KCS_STATUS_OBF;
409 	return mctp_astlpc_kcs_set_status(astlpc, status);
410 }
411 
412 static int mctp_binding_astlpc_start_bmc(struct mctp_binding *b)
413 {
414 	struct mctp_binding_astlpc *astlpc =
415 		container_of(b, struct mctp_binding_astlpc, binding);
416 
417 	return mctp_astlpc_init_bmc(astlpc);
418 }
419 
420 static bool mctp_astlpc_validate_version(uint16_t bmc_ver_min,
421 					 uint16_t bmc_ver_cur,
422 					 uint16_t host_ver_min,
423 					 uint16_t host_ver_cur)
424 {
425 	if (!(bmc_ver_min && bmc_ver_cur && host_ver_min && host_ver_cur)) {
426 		mctp_prerr("Invalid version present in [%" PRIu16 ", %" PRIu16
427 			   "], [%" PRIu16 ", %" PRIu16 "]",
428 			   bmc_ver_min, bmc_ver_cur, host_ver_min,
429 			   host_ver_cur);
430 		return false;
431 	} else if (bmc_ver_min > bmc_ver_cur) {
432 		mctp_prerr("Invalid bmc version range [%" PRIu16 ", %" PRIu16
433 			   "]",
434 			   bmc_ver_min, bmc_ver_cur);
435 		return false;
436 	} else if (host_ver_min > host_ver_cur) {
437 		mctp_prerr("Invalid host version range [%" PRIu16 ", %" PRIu16
438 			   "]",
439 			   host_ver_min, host_ver_cur);
440 		return false;
441 	} else if ((host_ver_cur < bmc_ver_min) ||
442 		   (host_ver_min > bmc_ver_cur)) {
443 		mctp_prerr(
444 			"Unable to satisfy version negotiation with ranges [%" PRIu16
445 			", %" PRIu16 "] and [%" PRIu16 ", %" PRIu16 "]",
446 			bmc_ver_min, bmc_ver_cur, host_ver_min, host_ver_cur);
447 		return false;
448 	}
449 
450 	return true;
451 }
452 
453 static int mctp_astlpc_negotiate_layout_host(struct mctp_binding_astlpc *astlpc)
454 {
455 	struct mctp_astlpc_layout layout;
456 	uint32_t sz;
457 	int rc;
458 
459 	rc = mctp_astlpc_layout_read(astlpc, &layout);
460 	if (rc < 0)
461 		return rc;
462 
463 	if (!mctp_astlpc_layout_validate(&layout)) {
464 		astlpc_prerr(
465 			astlpc,
466 			"BMC provided invalid buffer layout: Rx {0x%" PRIx32
467 			", %" PRIu32 "}, Tx {0x%" PRIx32 ", %" PRIu32 "}",
468 			layout.rx.offset, layout.rx.size, layout.tx.offset,
469 			layout.tx.size);
470 		return -EINVAL;
471 	}
472 
473 	astlpc_prinfo(astlpc, "Desire an MTU of %" PRIu32 " bytes",
474 		      astlpc->requested_mtu);
475 
476 	sz = ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(astlpc->requested_mtu));
477 	layout.rx.size = sz;
478 
479 	if (!mctp_astlpc_layout_validate(&layout)) {
480 		astlpc_prerr(
481 			astlpc,
482 			"Generated invalid buffer layout with size %" PRIu32
483 			": Rx {0x%" PRIx32 ", %" PRIu32 "}, Tx {0x%" PRIx32
484 			", %" PRIu32 "}",
485 			sz, layout.rx.offset, layout.rx.size, layout.tx.offset,
486 			layout.tx.size);
487 		return -EINVAL;
488 	}
489 
490 	astlpc_prinfo(astlpc, "Requesting MTU of %" PRIu32 " bytes",
491 		      astlpc->requested_mtu);
492 
493 	return mctp_astlpc_layout_write(astlpc, &layout);
494 }
495 
496 static int mctp_astlpc_init_host(struct mctp_binding_astlpc *astlpc)
497 {
498 	const uint16_t ver_min_be = htobe16(ASTLPC_VER_MIN);
499 	const uint16_t ver_cur_be = htobe16(ASTLPC_VER_CUR);
500 	uint16_t bmc_ver_min, bmc_ver_cur;
501 	struct mctp_lpcmap_hdr hdr;
502 	uint8_t status;
503 	int rc;
504 
505 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status);
506 	if (rc) {
507 		mctp_prwarn("KCS status read failed");
508 		return rc;
509 	}
510 
511 	astlpc->kcs_status = status;
512 
513 	if (!(status & KCS_STATUS_BMC_READY))
514 		return -EHOSTDOWN;
515 
516 	mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
517 
518 	bmc_ver_min = be16toh(hdr.bmc_ver_min);
519 	bmc_ver_cur = be16toh(hdr.bmc_ver_cur);
520 
521 	if (!mctp_astlpc_validate_version(bmc_ver_min, bmc_ver_cur,
522 					  ASTLPC_VER_MIN, ASTLPC_VER_CUR)) {
523 		astlpc_prerr(astlpc, "Cannot negotiate with invalid versions");
524 		return -EINVAL;
525 	}
526 
527 	/*
528 	 * Negotation always chooses the highest protocol version that
529 	 * satisfies the version constraints. So check whether the BMC supports
530 	 * v2, and if so, negotiate in v2 style.
531 	 */
532 	if (ASTLPC_VER_CUR >= 2 && bmc_ver_cur >= 2) {
533 		rc = mctp_astlpc_negotiate_layout_host(astlpc);
534 		if (rc < 0)
535 			return rc;
536 	}
537 
538 	/* Version negotiation */
539 	mctp_astlpc_lpc_write(astlpc, &ver_min_be,
540 			      offsetof(struct mctp_lpcmap_hdr, host_ver_min),
541 			      sizeof(ver_min_be));
542 
543 	mctp_astlpc_lpc_write(astlpc, &ver_cur_be,
544 			      offsetof(struct mctp_lpcmap_hdr, host_ver_cur),
545 			      sizeof(ver_cur_be));
546 
547 	/* Send channel init command */
548 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, 0x0);
549 	if (rc) {
550 		astlpc_prwarn(astlpc, "KCS write failed");
551 	}
552 
553 	return rc;
554 }
555 
556 static int mctp_binding_astlpc_start_host(struct mctp_binding *b)
557 {
558 	struct mctp_binding_astlpc *astlpc =
559 		container_of(b, struct mctp_binding_astlpc, binding);
560 
561 	return mctp_astlpc_init_host(astlpc);
562 }
563 
564 static bool __mctp_astlpc_kcs_ready(struct mctp_binding_astlpc *astlpc,
565 				    uint8_t status, bool is_write)
566 {
567 	bool is_bmc;
568 	bool ready_state;
569 	uint8_t flag;
570 
571 	is_bmc = (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC);
572 	flag = (is_bmc ^ is_write) ? KCS_STATUS_IBF : KCS_STATUS_OBF;
573 	ready_state = is_write ? 0 : 1;
574 
575 	return !!(status & flag) == ready_state;
576 }
577 
578 static inline bool
579 mctp_astlpc_kcs_read_ready(struct mctp_binding_astlpc *astlpc, uint8_t status)
580 {
581 	return __mctp_astlpc_kcs_ready(astlpc, status, false);
582 }
583 
584 static inline bool
585 mctp_astlpc_kcs_write_ready(struct mctp_binding_astlpc *astlpc, uint8_t status)
586 {
587 	return __mctp_astlpc_kcs_ready(astlpc, status, true);
588 }
589 
590 static int mctp_astlpc_kcs_send(struct mctp_binding_astlpc *astlpc,
591 		uint8_t data)
592 {
593 	uint8_t status;
594 	int rc;
595 
596 	for (;;) {
597 		rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS,
598 					  &status);
599 		if (rc) {
600 			astlpc_prwarn(astlpc, "KCS status read failed");
601 			return -1;
602 		}
603 		if (mctp_astlpc_kcs_write_ready(astlpc, status))
604 			break;
605 		/* todo: timeout */
606 	}
607 
608 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, data);
609 	if (rc) {
610 		astlpc_prwarn(astlpc, "KCS data write failed");
611 		return -1;
612 	}
613 
614 	return 0;
615 }
616 
617 static int mctp_binding_astlpc_tx(struct mctp_binding *b,
618 		struct mctp_pktbuf *pkt)
619 {
620 	struct mctp_binding_astlpc *astlpc = binding_to_astlpc(b);
621 	uint32_t len, len_be;
622 	struct mctp_hdr *hdr;
623 
624 	hdr = mctp_pktbuf_hdr(pkt);
625 	len = mctp_pktbuf_size(pkt);
626 
627 	astlpc_prdebug(astlpc,
628 		       "%s: Transmitting %" PRIu32
629 		       "-byte packet (%hhu, %hhu, 0x%hhx)",
630 		       __func__, len, hdr->src, hdr->dest, hdr->flags_seq_tag);
631 
632 	if (len > ASTLPC_BODY_SIZE(astlpc->layout.tx.size)) {
633 		astlpc_prwarn(astlpc, "invalid TX len 0x%x", len);
634 		return -1;
635 	}
636 
637 	len_be = htobe32(len);
638 	mctp_astlpc_lpc_write(astlpc, &len_be, astlpc->layout.tx.offset,
639 			      sizeof(len_be));
640 	mctp_astlpc_lpc_write(astlpc, hdr, astlpc->layout.tx.offset + 4, len);
641 
642 	mctp_binding_set_tx_enabled(b, false);
643 
644 	mctp_astlpc_kcs_send(astlpc, 0x1);
645 	return 0;
646 }
647 
648 static uint16_t mctp_astlpc_negotiate_version(uint16_t bmc_ver_min,
649 					      uint16_t bmc_ver_cur,
650 					      uint16_t host_ver_min,
651 					      uint16_t host_ver_cur)
652 {
653 	if (!mctp_astlpc_validate_version(bmc_ver_min, bmc_ver_cur,
654 					  host_ver_min, host_ver_cur))
655 		return ASTLPC_VER_BAD;
656 
657 	if (bmc_ver_cur < host_ver_cur)
658 		return bmc_ver_cur;
659 
660 	return host_ver_cur;
661 }
662 
663 static uint32_t mctp_astlpc_calculate_mtu(struct mctp_binding_astlpc *astlpc,
664 					  struct mctp_astlpc_layout *layout)
665 {
666 	uint32_t low, high, limit;
667 
668 	/* Derive the largest MTU the BMC _can_ support */
669 	low = MIN(astlpc->layout.rx.offset, astlpc->layout.tx.offset);
670 	high = MAX(astlpc->layout.rx.offset, astlpc->layout.tx.offset);
671 	limit = high - low;
672 
673 	/* Determine the largest MTU the BMC _wants_ to support */
674 	if (astlpc->requested_mtu) {
675 		uint32_t req = astlpc->requested_mtu;
676 
677 		limit = MIN(limit, ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(req)));
678 	}
679 
680 	/* Determine the accepted MTU, applied both directions by convention */
681 	return MCTP_BODY_SIZE(ASTLPC_BODY_SIZE(MIN(limit, layout->tx.size)));
682 }
683 
684 static int mctp_astlpc_negotiate_layout_bmc(struct mctp_binding_astlpc *astlpc,
685 					    uint16_t version)
686 {
687 	struct mctp_astlpc_layout proposed, pending;
688 	uint32_t sz, mtu;
689 	int rc;
690 
691 	/* Extract the host's proposed layout */
692 	rc = mctp_astlpc_layout_read(astlpc, &proposed);
693 	if (rc < 0)
694 		return rc;
695 
696 	if (!mctp_astlpc_layout_validate(&proposed))
697 		return -EINVAL;
698 
699 	/* Negotiate the MTU */
700 	mtu = mctp_astlpc_calculate_mtu(astlpc, &proposed);
701 	sz = ASTLPC_PACKET_SIZE(MCTP_PACKET_SIZE(mtu));
702 
703 	/*
704 	 * Use symmetric MTUs by convention and to pass constraints in rx/tx
705 	 * functions
706 	 */
707 	pending = astlpc->layout;
708 	pending.tx.size = sz;
709 	pending.rx.size = sz;
710 
711 	if (mctp_astlpc_layout_validate(&pending)) {
712 		/* We found a sensible Rx MTU, so honour it */
713 		astlpc->layout = pending;
714 
715 		/* Enforce the negotiated MTU */
716 		rc = mctp_astlpc_layout_write(astlpc, &astlpc->layout);
717 		if (rc < 0)
718 			return rc;
719 
720 		astlpc_prinfo(astlpc, "Negotiated an MTU of %" PRIu32 " bytes",
721 			      mtu);
722 	} else {
723 		astlpc_prwarn(astlpc, "MTU negotiation failed");
724 		return -EINVAL;
725 	}
726 
727 	if (version >= 2)
728 		astlpc->binding.pkt_size = MCTP_PACKET_SIZE(mtu);
729 
730 	return 0;
731 }
732 
733 static void mctp_astlpc_init_channel(struct mctp_binding_astlpc *astlpc)
734 {
735 	uint16_t negotiated, negotiated_be;
736 	struct mctp_lpcmap_hdr hdr;
737 	uint8_t status;
738 	int rc;
739 
740 	mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
741 
742 	/* Version negotiation */
743 	negotiated =
744 		mctp_astlpc_negotiate_version(ASTLPC_VER_MIN, ASTLPC_VER_CUR,
745 					      be16toh(hdr.host_ver_min),
746 					      be16toh(hdr.host_ver_cur));
747 
748 	/* Host Rx MTU negotiation: Failure terminates channel init */
749 	rc = mctp_astlpc_negotiate_layout_bmc(astlpc, negotiated);
750 	if (rc < 0)
751 		negotiated = ASTLPC_VER_BAD;
752 
753 	/* Populate the negotiated version */
754 	astlpc->version = negotiated;
755 	negotiated_be = htobe16(negotiated);
756 	mctp_astlpc_lpc_write(astlpc, &negotiated_be,
757 			      offsetof(struct mctp_lpcmap_hdr, negotiated_ver),
758 			      sizeof(negotiated_be));
759 
760 	/* Finalise the configuration */
761 	status = KCS_STATUS_BMC_READY | KCS_STATUS_OBF;
762 	if (negotiated > 0) {
763 		astlpc_prinfo(astlpc, "Negotiated binding version %" PRIu16,
764 			      negotiated);
765 		status |= KCS_STATUS_CHANNEL_ACTIVE;
766 	} else {
767 		astlpc_prerr(astlpc, "Failed to initialise channel\n");
768 	}
769 
770 	mctp_astlpc_kcs_set_status(astlpc, status);
771 
772 	mctp_binding_set_tx_enabled(&astlpc->binding,
773 				    status & KCS_STATUS_CHANNEL_ACTIVE);
774 }
775 
776 static void mctp_astlpc_rx_start(struct mctp_binding_astlpc *astlpc)
777 {
778 	struct mctp_pktbuf *pkt;
779 	uint32_t len;
780 
781 	mctp_astlpc_lpc_read(astlpc, &len, astlpc->layout.rx.offset,
782 			     sizeof(len));
783 	len = be32toh(len);
784 
785 	if (len > ASTLPC_BODY_SIZE(astlpc->layout.rx.size)) {
786 		astlpc_prwarn(astlpc, "invalid RX len 0x%x", len);
787 		return;
788 	}
789 
790 	assert(astlpc->binding.pkt_size >= 0);
791 	if (len > (uint32_t)astlpc->binding.pkt_size) {
792 		mctp_prwarn("invalid RX len 0x%x", len);
793 		astlpc_prwarn(astlpc, "invalid RX len 0x%x", len);
794 		return;
795 	}
796 
797 	pkt = mctp_pktbuf_alloc(&astlpc->binding, len);
798 	if (!pkt)
799 		goto out_complete;
800 
801 	mctp_astlpc_lpc_read(astlpc, mctp_pktbuf_hdr(pkt),
802 			     astlpc->layout.rx.offset + 4, len);
803 
804 	mctp_bus_rx(&astlpc->binding, pkt);
805 
806 out_complete:
807 	mctp_astlpc_kcs_send(astlpc, 0x2);
808 }
809 
810 static void mctp_astlpc_tx_complete(struct mctp_binding_astlpc *astlpc)
811 {
812 	mctp_binding_set_tx_enabled(&astlpc->binding, true);
813 }
814 
815 static int mctp_astlpc_finalise_channel(struct mctp_binding_astlpc *astlpc)
816 {
817 	struct mctp_astlpc_layout layout;
818 	uint16_t negotiated;
819 	int rc;
820 
821 	rc = mctp_astlpc_lpc_read(astlpc, &negotiated,
822 				  offsetof(struct mctp_lpcmap_hdr,
823 					   negotiated_ver),
824 				  sizeof(negotiated));
825 	if (rc < 0)
826 		return rc;
827 
828 	negotiated = be16toh(negotiated);
829 
830 	if (negotiated == ASTLPC_VER_BAD || negotiated < ASTLPC_VER_MIN ||
831 	    negotiated > ASTLPC_VER_CUR) {
832 		astlpc_prerr(astlpc, "Failed to negotiate version, got: %u\n",
833 			     negotiated);
834 		return -EINVAL;
835 	}
836 
837 	astlpc->version = negotiated;
838 
839 	rc = mctp_astlpc_layout_read(astlpc, &layout);
840 	if (rc < 0)
841 		return rc;
842 
843 	if (!mctp_astlpc_layout_validate(&layout)) {
844 		mctp_prerr("BMC proposed invalid buffer parameters");
845 		return -EINVAL;
846 	}
847 
848 	astlpc->layout = layout;
849 
850 	if (negotiated >= 2)
851 		astlpc->binding.pkt_size =
852 			ASTLPC_BODY_SIZE(astlpc->layout.tx.size);
853 
854 	return 0;
855 }
856 
857 static int mctp_astlpc_update_channel(struct mctp_binding_astlpc *astlpc,
858 				      uint8_t status)
859 {
860 	uint8_t updated;
861 	int rc = 0;
862 
863 	assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
864 
865 	updated = astlpc->kcs_status ^ status;
866 
867 	astlpc_prdebug(astlpc, "%s: status: 0x%x, update: 0x%x", __func__,
868 		       status, updated);
869 
870 	if (updated & KCS_STATUS_BMC_READY) {
871 		if (status & KCS_STATUS_BMC_READY) {
872 			astlpc->kcs_status = status;
873 			return astlpc->binding.start(&astlpc->binding);
874 		} else {
875 			mctp_binding_set_tx_enabled(&astlpc->binding, false);
876 		}
877 	}
878 
879 	if (astlpc->version == 0 || updated & KCS_STATUS_CHANNEL_ACTIVE) {
880 		bool enable;
881 
882 		rc = mctp_astlpc_finalise_channel(astlpc);
883 		enable = (status & KCS_STATUS_CHANNEL_ACTIVE) && rc == 0;
884 
885 		mctp_binding_set_tx_enabled(&astlpc->binding, enable);
886 	}
887 
888 	astlpc->kcs_status = status;
889 
890 	return rc;
891 }
892 
893 int mctp_astlpc_poll(struct mctp_binding_astlpc *astlpc)
894 {
895 	uint8_t status, data;
896 	int rc;
897 
898 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status);
899 	if (rc) {
900 		astlpc_prwarn(astlpc, "KCS read error");
901 		return -1;
902 	}
903 
904 	astlpc_prdebug(astlpc, "%s: status: 0x%hhx", __func__, status);
905 
906 	if (!mctp_astlpc_kcs_read_ready(astlpc, status))
907 		return 0;
908 
909 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_DATA, &data);
910 	if (rc) {
911 		astlpc_prwarn(astlpc, "KCS data read error");
912 		return -1;
913 	}
914 
915 	astlpc_prdebug(astlpc, "%s: data: 0x%hhx", __func__, data);
916 
917 	if (!astlpc->version && !(data == 0x0 || data == 0xff)) {
918 		astlpc_prwarn(astlpc, "Invalid message for binding state: 0x%x",
919 			      data);
920 		return 0;
921 	}
922 
923 	switch (data) {
924 	case 0x0:
925 		mctp_astlpc_init_channel(astlpc);
926 		break;
927 	case 0x1:
928 		mctp_astlpc_rx_start(astlpc);
929 		break;
930 	case 0x2:
931 		mctp_astlpc_tx_complete(astlpc);
932 		break;
933 	case 0xff:
934 		/* No responsibilities for the BMC on 0xff */
935 		if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST) {
936 			rc = mctp_astlpc_update_channel(astlpc, status);
937 			if (rc < 0)
938 				return rc;
939 		}
940 		break;
941 	default:
942 		astlpc_prwarn(astlpc, "unknown message 0x%x", data);
943 	}
944 
945 	/* Handle silent loss of bmc-ready */
946 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST) {
947 		if (!(status & KCS_STATUS_BMC_READY && data == 0xff))
948 			return mctp_astlpc_update_channel(astlpc, status);
949 	}
950 
951 	return rc;
952 }
953 
954 /* allocate and basic initialisation */
955 static struct mctp_binding_astlpc *__mctp_astlpc_init(uint8_t mode,
956 						      uint32_t mtu)
957 {
958 	struct mctp_binding_astlpc *astlpc;
959 
960 	assert((mode == MCTP_BINDING_ASTLPC_MODE_BMC) ||
961 	       (mode == MCTP_BINDING_ASTLPC_MODE_HOST));
962 
963 	astlpc = __mctp_alloc(sizeof(*astlpc));
964 	if (!astlpc)
965 		return NULL;
966 
967 	memset(astlpc, 0, sizeof(*astlpc));
968 	astlpc->mode = mode;
969 	astlpc->lpc_map = NULL;
970 	astlpc->requested_mtu = mtu;
971 	astlpc->binding.name = "astlpc";
972 	astlpc->binding.version = 1;
973 	astlpc->binding.pkt_size =
974 		MCTP_PACKET_SIZE(mtu > MCTP_BTU ? mtu : MCTP_BTU);
975 	astlpc->binding.pkt_pad = 0;
976 	astlpc->binding.tx = mctp_binding_astlpc_tx;
977 	if (mode == MCTP_BINDING_ASTLPC_MODE_BMC)
978 		astlpc->binding.start = mctp_binding_astlpc_start_bmc;
979 	else if (mode == MCTP_BINDING_ASTLPC_MODE_HOST)
980 		astlpc->binding.start = mctp_binding_astlpc_start_host;
981 	else {
982 		astlpc_prerr(astlpc, "%s: Invalid mode: %d\n", __func__, mode);
983 		__mctp_free(astlpc);
984 		return NULL;
985 	}
986 
987 	return astlpc;
988 }
989 
990 struct mctp_binding *mctp_binding_astlpc_core(struct mctp_binding_astlpc *b)
991 {
992 	return &b->binding;
993 }
994 
995 struct mctp_binding_astlpc *
996 mctp_astlpc_init(uint8_t mode, uint32_t mtu, void *lpc_map,
997 		 const struct mctp_binding_astlpc_ops *ops, void *ops_data)
998 {
999 	struct mctp_binding_astlpc *astlpc;
1000 
1001 	if (!(mode == MCTP_BINDING_ASTLPC_MODE_BMC ||
1002 	      mode == MCTP_BINDING_ASTLPC_MODE_HOST)) {
1003 		mctp_prerr("Unknown binding mode: %u", mode);
1004 		return NULL;
1005 	}
1006 
1007 	astlpc = __mctp_astlpc_init(mode, mtu);
1008 	if (!astlpc)
1009 		return NULL;
1010 
1011 	memcpy(&astlpc->ops, ops, sizeof(astlpc->ops));
1012 	astlpc->ops_data = ops_data;
1013 	astlpc->lpc_map = lpc_map;
1014 	astlpc->mode = mode;
1015 
1016 	return astlpc;
1017 }
1018 
1019 struct mctp_binding_astlpc *
1020 mctp_astlpc_init_ops(const struct mctp_binding_astlpc_ops *ops, void *ops_data,
1021 		     void *lpc_map)
1022 {
1023 	return mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, lpc_map,
1024 				ops, ops_data);
1025 }
1026 
1027 void mctp_astlpc_destroy(struct mctp_binding_astlpc *astlpc)
1028 {
1029 	/* Clear channel-active and bmc-ready */
1030 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC)
1031 		mctp_astlpc_kcs_set_status(astlpc, KCS_STATUS_OBF);
1032 	__mctp_free(astlpc);
1033 }
1034 
1035 #ifdef MCTP_HAVE_FILEIO
1036 
1037 static int mctp_astlpc_init_fileio_lpc(struct mctp_binding_astlpc *astlpc)
1038 {
1039 	struct aspeed_lpc_ctrl_mapping map = {
1040 		.window_type = ASPEED_LPC_CTRL_WINDOW_MEMORY,
1041 		.window_id = 0, /* There's only one */
1042 		.flags = 0,
1043 		.addr = 0,
1044 		.offset = 0,
1045 		.size = 0
1046 	};
1047 	void *lpc_map_base;
1048 	int fd, rc;
1049 
1050 	fd = open(lpc_path, O_RDWR | O_SYNC);
1051 	if (fd < 0) {
1052 		astlpc_prwarn(astlpc, "LPC open (%s) failed", lpc_path);
1053 		return -1;
1054 	}
1055 
1056 	rc = ioctl(fd, ASPEED_LPC_CTRL_IOCTL_GET_SIZE, &map);
1057 	if (rc) {
1058 		astlpc_prwarn(astlpc, "LPC GET_SIZE failed");
1059 		close(fd);
1060 		return -1;
1061 	}
1062 
1063 	lpc_map_base =
1064 		mmap(NULL, map.size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1065 	if (lpc_map_base == MAP_FAILED) {
1066 		astlpc_prwarn(astlpc, "LPC mmap failed");
1067 		rc = -1;
1068 	} else {
1069 		astlpc->lpc_map = lpc_map_base + map.size - LPC_WIN_SIZE;
1070 	}
1071 
1072 	close(fd);
1073 
1074 	return rc;
1075 }
1076 
1077 static int mctp_astlpc_init_fileio_kcs(struct mctp_binding_astlpc *astlpc)
1078 {
1079 	astlpc->kcs_fd = open(kcs_path, O_RDWR);
1080 	if (astlpc->kcs_fd < 0)
1081 		return -1;
1082 
1083 	return 0;
1084 }
1085 
1086 static int __mctp_astlpc_fileio_kcs_read(void *arg,
1087 		enum mctp_binding_astlpc_kcs_reg reg, uint8_t *val)
1088 {
1089 	struct mctp_binding_astlpc *astlpc = arg;
1090 	off_t offset = reg;
1091 	int rc;
1092 
1093 	rc = pread(astlpc->kcs_fd, val, 1, offset);
1094 
1095 	return rc == 1 ? 0 : -1;
1096 }
1097 
1098 static int __mctp_astlpc_fileio_kcs_write(void *arg,
1099 		enum mctp_binding_astlpc_kcs_reg reg, uint8_t val)
1100 {
1101 	struct mctp_binding_astlpc *astlpc = arg;
1102 	off_t offset = reg;
1103 	int rc;
1104 
1105 	rc = pwrite(astlpc->kcs_fd, &val, 1, offset);
1106 
1107 	return rc == 1 ? 0 : -1;
1108 }
1109 
1110 int mctp_astlpc_get_fd(struct mctp_binding_astlpc *astlpc)
1111 {
1112 	return astlpc->kcs_fd;
1113 }
1114 
1115 struct mctp_binding_astlpc *mctp_astlpc_init_fileio(void)
1116 {
1117 	struct mctp_binding_astlpc *astlpc;
1118 	int rc;
1119 
1120 	/*
1121 	 * If we're doing file IO then we're very likely not running
1122 	 * freestanding, so lets assume that we're on the BMC side.
1123 	 *
1124 	 * Requesting an MTU of 0 requests the largest possible MTU, whatever
1125 	 * value that might take.
1126 	 */
1127 	astlpc = __mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, 0);
1128 	if (!astlpc)
1129 		return NULL;
1130 
1131 	/* Set internal operations for kcs. We use direct accesses to the lpc
1132 	 * map area */
1133 	astlpc->ops.kcs_read = __mctp_astlpc_fileio_kcs_read;
1134 	astlpc->ops.kcs_write = __mctp_astlpc_fileio_kcs_write;
1135 	astlpc->ops_data = astlpc;
1136 
1137 	rc = mctp_astlpc_init_fileio_lpc(astlpc);
1138 	if (rc) {
1139 		free(astlpc);
1140 		return NULL;
1141 	}
1142 
1143 	rc = mctp_astlpc_init_fileio_kcs(astlpc);
1144 	if (rc) {
1145 		free(astlpc);
1146 		return NULL;
1147 	}
1148 
1149 	return astlpc;
1150 }
1151 #else
1152 struct mctp_binding_astlpc * __attribute__((const))
1153 	mctp_astlpc_init_fileio(void)
1154 {
1155 	astlpc_prerr(astlpc, "Missing support for file IO");
1156 	return NULL;
1157 }
1158 
1159 int __attribute__((const)) mctp_astlpc_get_fd(
1160 		struct mctp_binding_astlpc *astlpc __attribute__((unused)))
1161 {
1162 	astlpc_prerr(astlpc, "Missing support for file IO");
1163 	return -1;
1164 }
1165 #endif
1166