xref: /openbmc/libmctp/astlpc.c (revision 4a09e1dc48831f20e15b5fe76bf3011eaf587dd9)
1 /* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */
2 
3 #if HAVE_CONFIG_H
4 #include "config.h"
5 #endif
6 
7 #if HAVE_ENDIAN_H
8 #include <endian.h>
9 #endif
10 
11 #include <assert.h>
12 #include <err.h>
13 #include <errno.h>
14 #include <inttypes.h>
15 #include <stdbool.h>
16 #include <stdlib.h>
17 #include <string.h>
18 
19 #define pr_fmt(x) "astlpc: " x
20 
21 #include "container_of.h"
22 #include "crc32.h"
23 #include "libmctp.h"
24 #include "libmctp-alloc.h"
25 #include "libmctp-log.h"
26 #include "libmctp-astlpc.h"
27 #include "range.h"
28 
29 #ifdef MCTP_HAVE_FILEIO
30 
31 #include <unistd.h>
32 #include <fcntl.h>
33 #include <poll.h>
34 #include <sys/ioctl.h>
35 #include <sys/mman.h>
36 #include <linux/aspeed-lpc-ctrl.h>
37 
38 /* kernel interface */
39 static const char *lpc_path = "/dev/aspeed-lpc-ctrl";
40 
41 #endif
42 
43 enum mctp_astlpc_cmd {
44 	cmd_initialise = 0x00,
45 	cmd_tx_begin = 0x01,
46 	cmd_rx_complete = 0x02,
47 	cmd_dummy_value = 0xff,
48 };
49 
50 enum mctp_astlpc_buffer_state {
51 	/*
52 	 * Prior to "Channel Ready" we mark the buffers as "idle" to catch illegal accesses. In this
53 	 * state neither side is considered the owner of the buffer.
54 	 *
55 	 * Upon "Channel Ready", each side transitions the buffers from the initial "idle" state
56 	 * to the following target states:
57 	 *
58 	 * Tx buffer: "acquired"
59 	 * Rx buffer: "released"
60 	 */
61 	buffer_state_idle,
62 
63 	/*
64 	 * Beyond initialisation by "Channel Ready", buffers are in the "acquired" state once:
65 	 *
66 	 * 1. We dequeue a control command transferring the buffer to our ownership out of the KCS
67 	 *    interface, and
68 	 * 2. We are yet to complete all of our required accesses to the buffer
69 	 *
70 	 * * The Tx buffer enters the "acquired" state when we dequeue the "Rx Complete" command
71 	 * * The Rx buffer enters the "acquired" state when we dequeue the "Tx Begin" command
72 	 *
73 	 * It is a failure of implementation if it's possible for both sides to simultaneously
74 	 * consider a buffer as "acquired".
75 	 */
76 	buffer_state_acquired,
77 
78 	/*
79 	 * Buffers are in the "prepared" state when:
80 	 *
81 	 * 1. We have completed all of our required accesses (read or write) for the buffer, and
82 	 * 2. We have not yet successfully enqueued the control command to hand off ownership
83 	 */
84 	buffer_state_prepared,
85 
86 	/*
87 	 * Beyond initialisation by "Channel Ready", buffers are in the "released" state once:
88 	 *
89 	 * 1. We successfully enqueue the control command transferring ownership to the remote
90 	 *    side in to the KCS interface
91 	 *
92 	 * * The Tx buffer enters the "released" state when we enqueue the "Tx Begin" command
93 	 * * The Rx buffer enters the "released" state when we enqueue the "Rx Complete" command
94 	 *
95 	 * It may be the case that both sides simultaneously consider a buffer to be in the
96 	 * "released" state. However, if this is true, it must also be true that a buffer ownership
97 	 * transfer command has been enqueued in the KCS interface and is yet to be dequeued.
98 	 */
99 	buffer_state_released,
100 };
101 
102 struct mctp_astlpc_buffer {
103 	uint32_t offset;
104 	uint32_t size;
105 	enum mctp_astlpc_buffer_state state;
106 };
107 
108 struct mctp_astlpc_layout {
109 	struct mctp_astlpc_buffer rx;
110 	struct mctp_astlpc_buffer tx;
111 };
112 
113 struct mctp_astlpc_protocol {
114 	uint16_t version;
115 	uint32_t (*packet_size)(uint32_t body);
116 	uint32_t (*body_size)(uint32_t packet);
117 	void (*pktbuf_protect)(struct mctp_pktbuf *pkt);
118 	bool (*pktbuf_validate)(struct mctp_pktbuf *pkt);
119 };
120 
121 struct mctp_binding_astlpc {
122 	struct mctp_binding binding;
123 
124 	void *lpc_map;
125 	struct mctp_astlpc_layout layout;
126 
127 	uint8_t mode;
128 	uint32_t requested_mtu;
129 
130 	const struct mctp_astlpc_protocol *proto;
131 
132 	/* direct ops data */
133 	struct mctp_binding_astlpc_ops ops;
134 	void *ops_data;
135 
136 	/* fileio ops data */
137 	int kcs_fd;
138 	uint8_t kcs_status;
139 };
140 
141 #define binding_to_astlpc(b)                                                   \
142 	container_of(b, struct mctp_binding_astlpc, binding)
143 
144 #define astlpc_prlog(ctx, lvl, fmt, ...)                                       \
145 	do {                                                                   \
146 		bool __bmc = ((ctx)->mode == MCTP_BINDING_ASTLPC_MODE_BMC);    \
147 		mctp_prlog(lvl, pr_fmt("%s: " fmt), __bmc ? "bmc" : "host",    \
148 			   ##__VA_ARGS__);                                     \
149 	} while (0)
150 
151 #define astlpc_prerr(ctx, fmt, ...)                                            \
152 	astlpc_prlog(ctx, MCTP_LOG_ERR, fmt, ##__VA_ARGS__)
153 #define astlpc_prwarn(ctx, fmt, ...)                                           \
154 	astlpc_prlog(ctx, MCTP_LOG_WARNING, fmt, ##__VA_ARGS__)
155 #define astlpc_prnotice(ctx, fmt, ...)                                         \
156 	astlpc_prlog(ctx, MCTP_LOG_NOTICE, fmt, ##__VA_ARGS__)
157 #define astlpc_prinfo(ctx, fmt, ...)                                           \
158 	astlpc_prlog(ctx, MCTP_LOG_INFO, fmt, ##__VA_ARGS__)
159 #define astlpc_prdebug(ctx, fmt, ...)                                          \
160 	astlpc_prlog(ctx, MCTP_LOG_DEBUG, fmt, ##__VA_ARGS__)
161 
162 /* clang-format off */
163 #define ASTLPC_MCTP_MAGIC	0x4d435450
164 #define ASTLPC_VER_BAD	0
165 #define ASTLPC_VER_MIN	1
166 
167 /* Support testing of new binding protocols */
168 #ifndef ASTLPC_VER_CUR
169 #define ASTLPC_VER_CUR	3
170 #endif
171 /* clang-format on */
172 
173 #ifndef ARRAY_SIZE
174 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
175 #endif
176 
astlpc_packet_size_v1(uint32_t body)177 static uint32_t astlpc_packet_size_v1(uint32_t body)
178 {
179 	assert((body + 4) > body);
180 
181 	return body + 4;
182 }
183 
astlpc_body_size_v1(uint32_t packet)184 static uint32_t astlpc_body_size_v1(uint32_t packet)
185 {
186 	assert((packet - 4) < packet);
187 
188 	return packet - 4;
189 }
190 
astlpc_pktbuf_protect_v1(struct mctp_pktbuf * pkt)191 void astlpc_pktbuf_protect_v1(struct mctp_pktbuf *pkt)
192 {
193 	(void)pkt;
194 }
195 
astlpc_pktbuf_validate_v1(struct mctp_pktbuf * pkt)196 bool astlpc_pktbuf_validate_v1(struct mctp_pktbuf *pkt)
197 {
198 	(void)pkt;
199 	return true;
200 }
201 
astlpc_packet_size_v3(uint32_t body)202 static uint32_t astlpc_packet_size_v3(uint32_t body)
203 {
204 	assert((body + 4 + 4) > body);
205 
206 	return body + 4 + 4;
207 }
208 
astlpc_body_size_v3(uint32_t packet)209 static uint32_t astlpc_body_size_v3(uint32_t packet)
210 {
211 	assert((packet - 4 - 4) < packet);
212 
213 	return packet - 4 - 4;
214 }
215 
astlpc_pktbuf_protect_v3(struct mctp_pktbuf * pkt)216 void astlpc_pktbuf_protect_v3(struct mctp_pktbuf *pkt)
217 {
218 	uint32_t code;
219 
220 	code = htobe32(crc32(mctp_pktbuf_hdr(pkt), mctp_pktbuf_size(pkt)));
221 	mctp_prdebug("%s: 0x%" PRIx32, __func__, code);
222 	mctp_pktbuf_push(pkt, &code, 4);
223 }
224 
astlpc_pktbuf_validate_v3(struct mctp_pktbuf * pkt)225 bool astlpc_pktbuf_validate_v3(struct mctp_pktbuf *pkt)
226 {
227 	uint32_t code;
228 	void *check;
229 
230 	code = be32toh(crc32(mctp_pktbuf_hdr(pkt), mctp_pktbuf_size(pkt) - 4));
231 	mctp_prdebug("%s: 0x%" PRIx32, __func__, code);
232 	check = mctp_pktbuf_pop(pkt, 4);
233 	return check && !memcmp(&code, check, 4);
234 }
235 
236 static const struct mctp_astlpc_protocol astlpc_protocol_version[] = {
237 	[0] = {
238 		.version = 0,
239 		.packet_size = NULL,
240 		.body_size = NULL,
241 		.pktbuf_protect = NULL,
242 		.pktbuf_validate = NULL,
243 	},
244 	[1] = {
245 		.version = 1,
246 		.packet_size = astlpc_packet_size_v1,
247 		.body_size = astlpc_body_size_v1,
248 		.pktbuf_protect = astlpc_pktbuf_protect_v1,
249 		.pktbuf_validate = astlpc_pktbuf_validate_v1,
250 	},
251 	[2] = {
252 		.version = 2,
253 		.packet_size = astlpc_packet_size_v1,
254 		.body_size = astlpc_body_size_v1,
255 		.pktbuf_protect = astlpc_pktbuf_protect_v1,
256 		.pktbuf_validate = astlpc_pktbuf_validate_v1,
257 	},
258 	[3] = {
259 		.version = 3,
260 		.packet_size = astlpc_packet_size_v3,
261 		.body_size = astlpc_body_size_v3,
262 		.pktbuf_protect = astlpc_pktbuf_protect_v3,
263 		.pktbuf_validate = astlpc_pktbuf_validate_v3,
264 	},
265 };
266 
267 struct mctp_lpcmap_hdr {
268 	uint32_t magic;
269 
270 	uint16_t bmc_ver_min;
271 	uint16_t bmc_ver_cur;
272 	uint16_t host_ver_min;
273 	uint16_t host_ver_cur;
274 	uint16_t negotiated_ver;
275 	uint16_t pad0;
276 
277 	struct {
278 		uint32_t rx_offset;
279 		uint32_t rx_size;
280 		uint32_t tx_offset;
281 		uint32_t tx_size;
282 	} layout;
283 } __attribute__((packed));
284 
285 static const uint32_t control_size = 0x100;
286 
287 #define LPC_WIN_SIZE (1 * 1024 * 1024)
288 
289 #define KCS_STATUS_BMC_READY	  0x80
290 #define KCS_STATUS_CHANNEL_ACTIVE 0x40
291 #define KCS_STATUS_IBF		  0x02
292 #define KCS_STATUS_OBF		  0x01
293 
mctp_astlpc_kcs_write(struct mctp_binding_astlpc * astlpc,enum mctp_binding_astlpc_kcs_reg reg,uint8_t val)294 static inline int mctp_astlpc_kcs_write(struct mctp_binding_astlpc *astlpc,
295 					enum mctp_binding_astlpc_kcs_reg reg,
296 					uint8_t val)
297 {
298 	return astlpc->ops.kcs_write(astlpc->ops_data, reg, val);
299 }
300 
mctp_astlpc_kcs_read(struct mctp_binding_astlpc * astlpc,enum mctp_binding_astlpc_kcs_reg reg,uint8_t * val)301 static inline int mctp_astlpc_kcs_read(struct mctp_binding_astlpc *astlpc,
302 				       enum mctp_binding_astlpc_kcs_reg reg,
303 				       uint8_t *val)
304 {
305 	return astlpc->ops.kcs_read(astlpc->ops_data, reg, val);
306 }
307 
mctp_astlpc_lpc_write(struct mctp_binding_astlpc * astlpc,const void * buf,long offset,size_t len)308 static inline int mctp_astlpc_lpc_write(struct mctp_binding_astlpc *astlpc,
309 					const void *buf, long offset,
310 					size_t len)
311 {
312 	astlpc_prdebug(astlpc, "%s: %zu bytes to 0x%lx", __func__, len, offset);
313 
314 	assert(offset >= 0);
315 
316 	/* Indirect access */
317 	if (astlpc->ops.lpc_write) {
318 		void *data = astlpc->ops_data;
319 
320 		return astlpc->ops.lpc_write(data, buf, offset, len);
321 	}
322 
323 	/* Direct mapping */
324 	assert(astlpc->lpc_map);
325 	memcpy(&((char *)astlpc->lpc_map)[offset], buf, len);
326 
327 	return 0;
328 }
329 
mctp_astlpc_lpc_read(struct mctp_binding_astlpc * astlpc,void * buf,long offset,size_t len)330 static inline int mctp_astlpc_lpc_read(struct mctp_binding_astlpc *astlpc,
331 				       void *buf, long offset, size_t len)
332 {
333 	astlpc_prdebug(astlpc, "%s: %zu bytes from 0x%lx", __func__, len,
334 		       offset);
335 
336 	assert(offset >= 0);
337 
338 	/* Indirect access */
339 	if (astlpc->ops.lpc_read) {
340 		void *data = astlpc->ops_data;
341 
342 		return astlpc->ops.lpc_read(data, buf, offset, len);
343 	}
344 
345 	/* Direct mapping */
346 	assert(astlpc->lpc_map);
347 	memcpy(buf, &((char *)astlpc->lpc_map)[offset], len);
348 
349 	return 0;
350 }
351 
352 static void
mctp_astlpc_kcs_print_status_write(struct mctp_binding_astlpc * astlpc,uint8_t status)353 mctp_astlpc_kcs_print_status_write(struct mctp_binding_astlpc *astlpc,
354 				   uint8_t status)
355 {
356 	astlpc_prnotice(
357 		astlpc, "Binding state is 0x%hhx: BMC %s, Channel %s, OBF %s",
358 		status, status & KCS_STATUS_BMC_READY ? "active" : "inactive",
359 		status & KCS_STATUS_CHANNEL_ACTIVE ? "active" : "inactive",
360 		status & KCS_STATUS_OBF ? "preserved" : "cleared");
361 }
362 
mctp_astlpc_kcs_set_status(struct mctp_binding_astlpc * astlpc,uint8_t status)363 static int mctp_astlpc_kcs_set_status(struct mctp_binding_astlpc *astlpc,
364 				      uint8_t status)
365 {
366 	uint8_t data;
367 	int rc;
368 
369 	/* Since we're setting the status register, we want the other endpoint
370 	 * to be interrupted. However, some hardware may only raise a host-side
371 	 * interrupt on an ODR event.
372 	 * So, write a dummy value of 0xff to ODR, which will ensure that an
373 	 * interrupt is triggered, and can be ignored by the host.
374 	 */
375 	data = cmd_dummy_value;
376 
377 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, status);
378 	if (rc) {
379 		astlpc_prwarn(astlpc, "KCS status write failed");
380 		return -1;
381 	}
382 
383 	mctp_astlpc_kcs_print_status_write(astlpc, status);
384 
385 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, data);
386 	if (rc) {
387 		astlpc_prwarn(astlpc, "KCS dummy data write failed");
388 		return -1;
389 	}
390 
391 	return 0;
392 }
393 
mctp_astlpc_layout_read(struct mctp_binding_astlpc * astlpc,struct mctp_astlpc_layout * layout)394 static int mctp_astlpc_layout_read(struct mctp_binding_astlpc *astlpc,
395 				   struct mctp_astlpc_layout *layout)
396 {
397 	struct mctp_lpcmap_hdr hdr;
398 	int rc;
399 
400 	rc = mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
401 	if (rc < 0)
402 		return rc;
403 
404 	/* Flip the buffers as the names are defined in terms of the host */
405 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) {
406 		layout->rx.offset = be32toh(hdr.layout.tx_offset);
407 		layout->rx.size = be32toh(hdr.layout.tx_size);
408 		layout->tx.offset = be32toh(hdr.layout.rx_offset);
409 		layout->tx.size = be32toh(hdr.layout.rx_size);
410 	} else {
411 		assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
412 
413 		layout->rx.offset = be32toh(hdr.layout.rx_offset);
414 		layout->rx.size = be32toh(hdr.layout.rx_size);
415 		layout->tx.offset = be32toh(hdr.layout.tx_offset);
416 		layout->tx.size = be32toh(hdr.layout.tx_size);
417 	}
418 
419 	return 0;
420 }
421 
mctp_astlpc_layout_write(struct mctp_binding_astlpc * astlpc,struct mctp_astlpc_layout * layout)422 static int mctp_astlpc_layout_write(struct mctp_binding_astlpc *astlpc,
423 				    struct mctp_astlpc_layout *layout)
424 {
425 	uint32_t rx_size_be;
426 
427 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) {
428 		struct mctp_lpcmap_hdr hdr;
429 
430 		/*
431 		 * Flip the buffers as the names are defined in terms of the
432 		 * host
433 		 */
434 		hdr.layout.rx_offset = htobe32(layout->tx.offset);
435 		hdr.layout.rx_size = htobe32(layout->tx.size);
436 		hdr.layout.tx_offset = htobe32(layout->rx.offset);
437 		hdr.layout.tx_size = htobe32(layout->rx.size);
438 
439 		return mctp_astlpc_lpc_write(astlpc, &hdr.layout,
440 					     offsetof(struct mctp_lpcmap_hdr,
441 						      layout),
442 					     sizeof(hdr.layout));
443 	}
444 
445 	assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
446 
447 	/*
448 	 * As of v2 we only need to write rx_size - the offsets are controlled
449 	 * by the BMC, as is the BMC's rx_size (host tx_size).
450 	 */
451 	rx_size_be = htobe32(layout->rx.size);
452 	return mctp_astlpc_lpc_write(astlpc, &rx_size_be,
453 				     offsetof(struct mctp_lpcmap_hdr,
454 					      layout.rx_size),
455 				     sizeof(rx_size_be));
456 }
457 
458 static bool
mctp_astlpc_buffer_validate(const struct mctp_binding_astlpc * astlpc,const struct mctp_astlpc_buffer * buf,const char * name)459 mctp_astlpc_buffer_validate(const struct mctp_binding_astlpc *astlpc,
460 			    const struct mctp_astlpc_buffer *buf,
461 			    const char *name)
462 {
463 	/* Check for overflow */
464 	if (buf->offset + buf->size < buf->offset) {
465 		mctp_prerr(
466 			"%s packet buffer parameters overflow: offset: 0x%" PRIx32
467 			", size: %" PRIu32,
468 			name, buf->offset, buf->size);
469 		return false;
470 	}
471 
472 	/* Check that the buffers are contained within the allocated space */
473 	if (buf->offset + buf->size > LPC_WIN_SIZE) {
474 		mctp_prerr(
475 			"%s packet buffer parameters exceed %uM window size: offset: 0x%" PRIx32
476 			", size: %" PRIu32,
477 			name, (LPC_WIN_SIZE / (1024 * 1024)), buf->offset,
478 			buf->size);
479 		return false;
480 	}
481 
482 	/* Check that the baseline transmission unit is supported */
483 	if (buf->size <
484 	    astlpc->proto->packet_size(MCTP_PACKET_SIZE(MCTP_BTU))) {
485 		mctp_prerr(
486 			"%s packet buffer too small: Require %" PRIu32
487 			" bytes to support the %u byte baseline transmission unit, found %" PRIu32,
488 			name,
489 			astlpc->proto->packet_size(MCTP_PACKET_SIZE(MCTP_BTU)),
490 			MCTP_BTU, buf->size);
491 		return false;
492 	}
493 
494 	/* Check for overlap with the control space */
495 	if (buf->offset < control_size) {
496 		mctp_prerr(
497 			"%s packet buffer overlaps control region {0x%" PRIx32
498 			", %" PRIu32 "}: Rx {0x%" PRIx32 ", %" PRIu32 "}",
499 			name, 0U, control_size, buf->offset, buf->size);
500 		return false;
501 	}
502 
503 	return true;
504 }
505 
506 static bool
mctp_astlpc_layout_validate(const struct mctp_binding_astlpc * astlpc,const struct mctp_astlpc_layout * layout)507 mctp_astlpc_layout_validate(const struct mctp_binding_astlpc *astlpc,
508 			    const struct mctp_astlpc_layout *layout)
509 {
510 	const struct mctp_astlpc_buffer *rx = &layout->rx;
511 	const struct mctp_astlpc_buffer *tx = &layout->tx;
512 	bool rx_valid, tx_valid;
513 
514 	rx_valid = mctp_astlpc_buffer_validate(astlpc, rx, "Rx");
515 	tx_valid = mctp_astlpc_buffer_validate(astlpc, tx, "Tx");
516 
517 	if (!(rx_valid && tx_valid))
518 		return false;
519 
520 	/* Check that the buffers are disjoint */
521 	if ((rx->offset <= tx->offset && rx->offset + rx->size > tx->offset) ||
522 	    (tx->offset <= rx->offset && tx->offset + tx->size > rx->offset)) {
523 		mctp_prerr("Rx and Tx packet buffers overlap: Rx {0x%" PRIx32
524 			   ", %" PRIu32 "}, Tx {0x%" PRIx32 ", %" PRIu32 "}",
525 			   rx->offset, rx->size, tx->offset, tx->size);
526 		return false;
527 	}
528 
529 	return true;
530 }
531 
mctp_astlpc_init_bmc(struct mctp_binding_astlpc * astlpc)532 static int mctp_astlpc_init_bmc(struct mctp_binding_astlpc *astlpc)
533 {
534 	struct mctp_lpcmap_hdr hdr = { 0 };
535 	uint8_t status;
536 	uint32_t sz;
537 
538 	/*
539 	 * The largest buffer size is half of the allocated MCTP space
540 	 * excluding the control space.
541 	 */
542 	sz = ((LPC_WIN_SIZE - control_size) / 2);
543 
544 	/*
545 	 * Trim the MTU to a multiple of 16 to meet the requirements of 12.17
546 	 * Query Hop in DSP0236 v1.3.0.
547 	 */
548 	sz = MCTP_BODY_SIZE(astlpc->proto->body_size(sz));
549 	sz &= ~0xfUL;
550 	sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(sz));
551 
552 	if (astlpc->requested_mtu) {
553 		uint32_t rpkt, rmtu;
554 
555 		rmtu = astlpc->requested_mtu;
556 		rpkt = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu));
557 		sz = MIN(sz, rpkt);
558 	}
559 
560 	/* Flip the buffers as the names are defined in terms of the host */
561 	astlpc->layout.tx.offset = control_size;
562 	astlpc->layout.tx.size = sz;
563 	astlpc->layout.rx.offset =
564 		astlpc->layout.tx.offset + astlpc->layout.tx.size;
565 	astlpc->layout.rx.size = sz;
566 
567 	if (!mctp_astlpc_layout_validate(astlpc, &astlpc->layout)) {
568 		astlpc_prerr(astlpc, "Cannot support an MTU of %" PRIu32, sz);
569 		return -EINVAL;
570 	}
571 
572 	hdr = (struct mctp_lpcmap_hdr){
573 		.magic = htobe32(ASTLPC_MCTP_MAGIC),
574 		.bmc_ver_min = htobe16(ASTLPC_VER_MIN),
575 		.bmc_ver_cur = htobe16(ASTLPC_VER_CUR),
576 
577 		/* Flip the buffers back as we're now describing the host's
578 		 * configuration to the host */
579 		.layout.rx_offset = htobe32(astlpc->layout.tx.offset),
580 		.layout.rx_size = htobe32(astlpc->layout.tx.size),
581 		.layout.tx_offset = htobe32(astlpc->layout.rx.offset),
582 		.layout.tx_size = htobe32(astlpc->layout.rx.size),
583 	};
584 
585 	mctp_astlpc_lpc_write(astlpc, &hdr, 0, sizeof(hdr));
586 
587 	/*
588 	 * Set status indicating that the BMC is now active. Be explicit about
589 	 * clearing OBF; we're reinitialising the binding and so any previous
590 	 * buffer state is irrelevant.
591 	 */
592 	status = KCS_STATUS_BMC_READY & ~KCS_STATUS_OBF;
593 	return mctp_astlpc_kcs_set_status(astlpc, status);
594 }
595 
mctp_binding_astlpc_start_bmc(struct mctp_binding * b)596 static int mctp_binding_astlpc_start_bmc(struct mctp_binding *b)
597 {
598 	struct mctp_binding_astlpc *astlpc =
599 		container_of(b, struct mctp_binding_astlpc, binding);
600 
601 	astlpc->proto = &astlpc_protocol_version[ASTLPC_VER_CUR];
602 
603 	return mctp_astlpc_init_bmc(astlpc);
604 }
605 
mctp_astlpc_validate_version(uint16_t bmc_ver_min,uint16_t bmc_ver_cur,uint16_t host_ver_min,uint16_t host_ver_cur)606 static bool mctp_astlpc_validate_version(uint16_t bmc_ver_min,
607 					 uint16_t bmc_ver_cur,
608 					 uint16_t host_ver_min,
609 					 uint16_t host_ver_cur)
610 {
611 	if (!(bmc_ver_min && bmc_ver_cur && host_ver_min && host_ver_cur)) {
612 		mctp_prerr("Invalid version present in [%" PRIu16 ", %" PRIu16
613 			   "], [%" PRIu16 ", %" PRIu16 "]",
614 			   bmc_ver_min, bmc_ver_cur, host_ver_min,
615 			   host_ver_cur);
616 		return false;
617 	} else if (bmc_ver_min > bmc_ver_cur) {
618 		mctp_prerr("Invalid bmc version range [%" PRIu16 ", %" PRIu16
619 			   "]",
620 			   bmc_ver_min, bmc_ver_cur);
621 		return false;
622 	} else if (host_ver_min > host_ver_cur) {
623 		mctp_prerr("Invalid host version range [%" PRIu16 ", %" PRIu16
624 			   "]",
625 			   host_ver_min, host_ver_cur);
626 		return false;
627 	} else if ((host_ver_cur < bmc_ver_min) ||
628 		   (host_ver_min > bmc_ver_cur)) {
629 		mctp_prerr(
630 			"Unable to satisfy version negotiation with ranges [%" PRIu16
631 			", %" PRIu16 "] and [%" PRIu16 ", %" PRIu16 "]",
632 			bmc_ver_min, bmc_ver_cur, host_ver_min, host_ver_cur);
633 		return false;
634 	}
635 
636 	return true;
637 }
638 
mctp_astlpc_negotiate_layout_host(struct mctp_binding_astlpc * astlpc)639 static int mctp_astlpc_negotiate_layout_host(struct mctp_binding_astlpc *astlpc)
640 {
641 	struct mctp_astlpc_layout layout;
642 	uint32_t rmtu;
643 	uint32_t sz;
644 	int rc;
645 
646 	rc = mctp_astlpc_layout_read(astlpc, &layout);
647 	if (rc < 0)
648 		return rc;
649 
650 	if (!mctp_astlpc_layout_validate(astlpc, &layout)) {
651 		astlpc_prerr(
652 			astlpc,
653 			"BMC provided invalid buffer layout: Rx {0x%" PRIx32
654 			", %" PRIu32 "}, Tx {0x%" PRIx32 ", %" PRIu32 "}",
655 			layout.rx.offset, layout.rx.size, layout.tx.offset,
656 			layout.tx.size);
657 		return -EINVAL;
658 	}
659 
660 	astlpc_prinfo(astlpc, "Desire an MTU of %" PRIu32 " bytes",
661 		      astlpc->requested_mtu);
662 
663 	rmtu = astlpc->requested_mtu;
664 	sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu));
665 	layout.rx.size = sz;
666 
667 	if (!mctp_astlpc_layout_validate(astlpc, &layout)) {
668 		astlpc_prerr(
669 			astlpc,
670 			"Generated invalid buffer layout with size %" PRIu32
671 			": Rx {0x%" PRIx32 ", %" PRIu32 "}, Tx {0x%" PRIx32
672 			", %" PRIu32 "}",
673 			sz, layout.rx.offset, layout.rx.size, layout.tx.offset,
674 			layout.tx.size);
675 		return -EINVAL;
676 	}
677 
678 	astlpc_prinfo(astlpc, "Requesting MTU of %" PRIu32 " bytes",
679 		      astlpc->requested_mtu);
680 
681 	return mctp_astlpc_layout_write(astlpc, &layout);
682 }
683 
mctp_astlpc_negotiate_version(uint16_t bmc_ver_min,uint16_t bmc_ver_cur,uint16_t host_ver_min,uint16_t host_ver_cur)684 static uint16_t mctp_astlpc_negotiate_version(uint16_t bmc_ver_min,
685 					      uint16_t bmc_ver_cur,
686 					      uint16_t host_ver_min,
687 					      uint16_t host_ver_cur)
688 {
689 	if (!mctp_astlpc_validate_version(bmc_ver_min, bmc_ver_cur,
690 					  host_ver_min, host_ver_cur))
691 		return ASTLPC_VER_BAD;
692 
693 	if (bmc_ver_cur < host_ver_cur)
694 		return bmc_ver_cur;
695 
696 	return host_ver_cur;
697 }
698 
mctp_astlpc_init_host(struct mctp_binding_astlpc * astlpc)699 static int mctp_astlpc_init_host(struct mctp_binding_astlpc *astlpc)
700 {
701 	const uint16_t ver_min_be = htobe16(ASTLPC_VER_MIN);
702 	const uint16_t ver_cur_be = htobe16(ASTLPC_VER_CUR);
703 	uint16_t bmc_ver_min, bmc_ver_cur, negotiated;
704 	struct mctp_lpcmap_hdr hdr;
705 	uint8_t status;
706 	int rc;
707 
708 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status);
709 	if (rc) {
710 		mctp_prwarn("KCS status read failed");
711 		return rc;
712 	}
713 
714 	astlpc->kcs_status = status;
715 
716 	if (!(status & KCS_STATUS_BMC_READY))
717 		return -EHOSTDOWN;
718 
719 	mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
720 
721 	bmc_ver_min = be16toh(hdr.bmc_ver_min);
722 	bmc_ver_cur = be16toh(hdr.bmc_ver_cur);
723 
724 	/* Calculate the expected value of negotiated_ver */
725 	negotiated = mctp_astlpc_negotiate_version(
726 		bmc_ver_min, bmc_ver_cur, ASTLPC_VER_MIN, ASTLPC_VER_CUR);
727 	if (!negotiated) {
728 		astlpc_prerr(astlpc, "Cannot negotiate with invalid versions");
729 		return -EINVAL;
730 	}
731 
732 	/* Assign protocol ops so we can calculate the packet buffer sizes */
733 	assert(negotiated < ARRAY_SIZE(astlpc_protocol_version));
734 	astlpc->proto = &astlpc_protocol_version[negotiated];
735 
736 	/* Negotiate packet buffers in v2 style if the BMC supports it */
737 	if (negotiated >= 2) {
738 		rc = mctp_astlpc_negotiate_layout_host(astlpc);
739 		if (rc < 0)
740 			return rc;
741 	}
742 
743 	/* Advertise the host's supported protocol versions */
744 	mctp_astlpc_lpc_write(astlpc, &ver_min_be,
745 			      offsetof(struct mctp_lpcmap_hdr, host_ver_min),
746 			      sizeof(ver_min_be));
747 
748 	mctp_astlpc_lpc_write(astlpc, &ver_cur_be,
749 			      offsetof(struct mctp_lpcmap_hdr, host_ver_cur),
750 			      sizeof(ver_cur_be));
751 
752 	/* Send channel init command */
753 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, 0x0);
754 	if (rc) {
755 		astlpc_prwarn(astlpc, "KCS write failed");
756 	}
757 
758 	/*
759 	 * Configure the host so `astlpc->proto->version == 0` holds until we
760 	 * receive a subsequent status update from the BMC. Until then,
761 	 * `astlpc->proto->version == 0` indicates that we're yet to complete
762 	 * the channel initialisation handshake.
763 	 *
764 	 * When the BMC provides a status update with KCS_STATUS_CHANNEL_ACTIVE
765 	 * set we will assign the appropriate protocol ops struct in accordance
766 	 * with `negotiated_ver`.
767 	 */
768 	astlpc->proto = &astlpc_protocol_version[ASTLPC_VER_BAD];
769 
770 	return rc;
771 }
772 
mctp_binding_astlpc_start_host(struct mctp_binding * b)773 static int mctp_binding_astlpc_start_host(struct mctp_binding *b)
774 {
775 	struct mctp_binding_astlpc *astlpc =
776 		container_of(b, struct mctp_binding_astlpc, binding);
777 
778 	return mctp_astlpc_init_host(astlpc);
779 }
780 
__mctp_astlpc_kcs_ready(struct mctp_binding_astlpc * astlpc,uint8_t status,bool is_write)781 static bool __mctp_astlpc_kcs_ready(struct mctp_binding_astlpc *astlpc,
782 				    uint8_t status, bool is_write)
783 {
784 	bool is_bmc;
785 	bool ready_state;
786 	uint8_t flag;
787 
788 	is_bmc = (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC);
789 	flag = (is_bmc ^ is_write) ? KCS_STATUS_IBF : KCS_STATUS_OBF;
790 	ready_state = is_write ? 0 : 1;
791 
792 	return !!(status & flag) == ready_state;
793 }
794 
795 static inline bool
mctp_astlpc_kcs_read_ready(struct mctp_binding_astlpc * astlpc,uint8_t status)796 mctp_astlpc_kcs_read_ready(struct mctp_binding_astlpc *astlpc, uint8_t status)
797 {
798 	return __mctp_astlpc_kcs_ready(astlpc, status, false);
799 }
800 
801 static inline bool
mctp_astlpc_kcs_write_ready(struct mctp_binding_astlpc * astlpc,uint8_t status)802 mctp_astlpc_kcs_write_ready(struct mctp_binding_astlpc *astlpc, uint8_t status)
803 {
804 	return __mctp_astlpc_kcs_ready(astlpc, status, true);
805 }
806 
mctp_astlpc_kcs_send(struct mctp_binding_astlpc * astlpc,enum mctp_astlpc_cmd data)807 static int mctp_astlpc_kcs_send(struct mctp_binding_astlpc *astlpc,
808 				enum mctp_astlpc_cmd data)
809 {
810 	uint8_t status;
811 	int rc;
812 
813 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status);
814 	if (rc) {
815 		astlpc_prwarn(astlpc, "KCS status read failed");
816 		return -EIO;
817 	}
818 	if (!mctp_astlpc_kcs_write_ready(astlpc, status))
819 		return -EBUSY;
820 
821 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, data);
822 	if (rc) {
823 		astlpc_prwarn(astlpc, "KCS data write failed");
824 		return -EIO;
825 	}
826 
827 	return 0;
828 }
829 
mctp_binding_astlpc_tx(struct mctp_binding * b,struct mctp_pktbuf * pkt)830 static int mctp_binding_astlpc_tx(struct mctp_binding *b,
831 				  struct mctp_pktbuf *pkt)
832 {
833 	struct mctp_binding_astlpc *astlpc = binding_to_astlpc(b);
834 	uint32_t len, len_be;
835 	struct mctp_hdr *hdr;
836 	int rc;
837 
838 	hdr = mctp_pktbuf_hdr(pkt);
839 	len = mctp_pktbuf_size(pkt);
840 
841 	astlpc_prdebug(astlpc,
842 		       "%s: Transmitting %" PRIu32
843 		       "-byte packet (%hhu, %hhu, 0x%hhx)",
844 		       __func__, len, hdr->src, hdr->dest, hdr->flags_seq_tag);
845 
846 	if (len > astlpc->proto->body_size(astlpc->layout.tx.size)) {
847 		astlpc_prwarn(astlpc, "invalid TX len %" PRIu32 ": %" PRIu32,
848 			      len,
849 			      astlpc->proto->body_size(astlpc->layout.tx.size));
850 		return -EMSGSIZE;
851 	}
852 
853 	mctp_binding_set_tx_enabled(b, false);
854 
855 	len_be = htobe32(len);
856 	mctp_astlpc_lpc_write(astlpc, &len_be, astlpc->layout.tx.offset,
857 			      sizeof(len_be));
858 
859 	astlpc->proto->pktbuf_protect(pkt);
860 	len = mctp_pktbuf_size(pkt);
861 
862 	mctp_astlpc_lpc_write(astlpc, hdr, astlpc->layout.tx.offset + 4, len);
863 
864 	astlpc->layout.tx.state = buffer_state_prepared;
865 
866 	rc = mctp_astlpc_kcs_send(astlpc, cmd_tx_begin);
867 	if (!rc)
868 		astlpc->layout.tx.state = buffer_state_released;
869 
870 	return rc == -EBUSY ? 0 : rc;
871 }
872 
873 /* Update binding pkt_size and reallocate tx_storage */
mctp_astlpc_set_pkt_size(struct mctp_binding_astlpc * astlpc,size_t pkt_size)874 static int mctp_astlpc_set_pkt_size(struct mctp_binding_astlpc *astlpc,
875 				    size_t pkt_size)
876 {
877 	size_t body = MCTP_BODY_SIZE(pkt_size);
878 	body += astlpc->binding.pkt_header + astlpc->binding.pkt_trailer;
879 	size_t pktbuf_size = MCTP_PKTBUF_SIZE(body);
880 	/* Reallocate TX storage */
881 	if (astlpc->binding.tx_storage) {
882 		__mctp_free(astlpc->binding.tx_storage);
883 	}
884 	astlpc->binding.tx_storage = __mctp_alloc(pktbuf_size);
885 	if (!astlpc->binding.tx_storage) {
886 		return -ENOMEM;
887 	}
888 
889 	astlpc->binding.pkt_size = pkt_size;
890 	return 0;
891 }
892 
mctp_astlpc_calculate_mtu(struct mctp_binding_astlpc * astlpc,struct mctp_astlpc_layout * layout)893 static uint32_t mctp_astlpc_calculate_mtu(struct mctp_binding_astlpc *astlpc,
894 					  struct mctp_astlpc_layout *layout)
895 {
896 	uint32_t low, high, limit, rpkt;
897 
898 	/* Derive the largest MTU the BMC _can_ support */
899 	low = MIN(astlpc->layout.rx.offset, astlpc->layout.tx.offset);
900 	high = MAX(astlpc->layout.rx.offset, astlpc->layout.tx.offset);
901 	limit = high - low;
902 
903 	/* Determine the largest MTU the BMC _wants_ to support */
904 	if (astlpc->requested_mtu) {
905 		uint32_t rmtu = astlpc->requested_mtu;
906 
907 		rpkt = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu));
908 		limit = MIN(limit, rpkt);
909 	}
910 
911 	/* Determine the accepted MTU, applied both directions by convention */
912 	rpkt = MIN(limit, layout->tx.size);
913 	return MCTP_BODY_SIZE(astlpc->proto->body_size(rpkt));
914 }
915 
mctp_astlpc_negotiate_layout_bmc(struct mctp_binding_astlpc * astlpc)916 static int mctp_astlpc_negotiate_layout_bmc(struct mctp_binding_astlpc *astlpc)
917 {
918 	struct mctp_astlpc_layout proposed, pending;
919 	uint32_t sz, mtu;
920 	int rc;
921 
922 	/* Do we have a valid protocol version? */
923 	if (!astlpc->proto->version)
924 		return -EINVAL;
925 
926 	/* Extract the host's proposed layout */
927 	rc = mctp_astlpc_layout_read(astlpc, &proposed);
928 	if (rc < 0)
929 		return rc;
930 
931 	/* Do we have a reasonable layout? */
932 	if (!mctp_astlpc_layout_validate(astlpc, &proposed))
933 		return -EINVAL;
934 
935 	/* Negotiate the MTU */
936 	mtu = mctp_astlpc_calculate_mtu(astlpc, &proposed);
937 	sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(mtu));
938 
939 	/*
940 	 * Use symmetric MTUs by convention and to pass constraints in rx/tx
941 	 * functions
942 	 */
943 	pending = astlpc->layout;
944 	pending.tx.size = sz;
945 	pending.rx.size = sz;
946 
947 	if (mctp_astlpc_layout_validate(astlpc, &pending)) {
948 		/* We found a sensible Rx MTU, so honour it */
949 		astlpc->layout = pending;
950 
951 		/* Enforce the negotiated MTU */
952 		rc = mctp_astlpc_layout_write(astlpc, &astlpc->layout);
953 		if (rc < 0)
954 			return rc;
955 
956 		astlpc_prinfo(astlpc, "Negotiated an MTU of %" PRIu32 " bytes",
957 			      mtu);
958 	} else {
959 		astlpc_prwarn(astlpc, "MTU negotiation failed");
960 		return -EINVAL;
961 	}
962 
963 	if (astlpc->proto->version >= 2) {
964 		rc = mctp_astlpc_set_pkt_size(astlpc, MCTP_PACKET_SIZE(mtu));
965 		if (rc) {
966 			astlpc_prwarn(astlpc, "Allocation error");
967 			return rc;
968 		}
969 	}
970 
971 	return 0;
972 }
973 
mctp_astlpc_init_channel(struct mctp_binding_astlpc * astlpc)974 static void mctp_astlpc_init_channel(struct mctp_binding_astlpc *astlpc)
975 {
976 	uint16_t negotiated, negotiated_be;
977 	struct mctp_lpcmap_hdr hdr;
978 	uint8_t status;
979 	int rc;
980 
981 	mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
982 
983 	/* Version negotiation */
984 	negotiated = mctp_astlpc_negotiate_version(ASTLPC_VER_MIN,
985 						   ASTLPC_VER_CUR,
986 						   be16toh(hdr.host_ver_min),
987 						   be16toh(hdr.host_ver_cur));
988 
989 	/* MTU negotiation requires knowing which protocol we'll use */
990 	assert(negotiated < ARRAY_SIZE(astlpc_protocol_version));
991 	astlpc->proto = &astlpc_protocol_version[negotiated];
992 
993 	/* Host Rx MTU negotiation: Failure terminates channel init */
994 	rc = mctp_astlpc_negotiate_layout_bmc(astlpc);
995 	if (rc < 0)
996 		negotiated = ASTLPC_VER_BAD;
997 
998 	/* Populate the negotiated version */
999 	negotiated_be = htobe16(negotiated);
1000 	mctp_astlpc_lpc_write(astlpc, &negotiated_be,
1001 			      offsetof(struct mctp_lpcmap_hdr, negotiated_ver),
1002 			      sizeof(negotiated_be));
1003 
1004 	/* Track buffer ownership */
1005 	astlpc->layout.tx.state = buffer_state_acquired;
1006 	astlpc->layout.rx.state = buffer_state_released;
1007 
1008 	/* Finalise the configuration */
1009 	status = KCS_STATUS_BMC_READY | KCS_STATUS_OBF;
1010 	if (negotiated > 0) {
1011 		astlpc_prinfo(astlpc, "Negotiated binding version %" PRIu16,
1012 			      negotiated);
1013 		status |= KCS_STATUS_CHANNEL_ACTIVE;
1014 	} else {
1015 		astlpc_prerr(astlpc, "Failed to initialise channel");
1016 	}
1017 
1018 	mctp_astlpc_kcs_set_status(astlpc, status);
1019 
1020 	mctp_binding_set_tx_enabled(&astlpc->binding,
1021 				    status & KCS_STATUS_CHANNEL_ACTIVE);
1022 }
1023 
mctp_astlpc_rx_start(struct mctp_binding_astlpc * astlpc)1024 static void mctp_astlpc_rx_start(struct mctp_binding_astlpc *astlpc)
1025 {
1026 	struct mctp_pktbuf *pkt;
1027 	struct mctp_hdr *hdr;
1028 	uint32_t body, packet;
1029 
1030 	mctp_astlpc_lpc_read(astlpc, &body, astlpc->layout.rx.offset,
1031 			     sizeof(body));
1032 	body = be32toh(body);
1033 
1034 	if (body > astlpc->proto->body_size(astlpc->layout.rx.size)) {
1035 		astlpc_prwarn(astlpc, "invalid RX len 0x%x", body);
1036 		return;
1037 	}
1038 
1039 	if ((size_t)body > astlpc->binding.pkt_size) {
1040 		astlpc_prwarn(astlpc, "invalid RX len 0x%x", body);
1041 		return;
1042 	}
1043 
1044 	/* Eliminate the medium-specific header that we just read */
1045 	packet = astlpc->proto->packet_size(body) - 4;
1046 	pkt = mctp_pktbuf_alloc(&astlpc->binding, packet);
1047 	if (!pkt) {
1048 		astlpc_prwarn(astlpc, "unable to allocate pktbuf len 0x%x",
1049 			      packet);
1050 		return;
1051 	}
1052 
1053 	/*
1054 	 * Read payload and medium-specific trailer from immediately after the
1055 	 * medium-specific header.
1056 	 */
1057 	mctp_astlpc_lpc_read(astlpc, mctp_pktbuf_hdr(pkt),
1058 			     astlpc->layout.rx.offset + 4, packet);
1059 
1060 	astlpc->layout.rx.state = buffer_state_prepared;
1061 
1062 	/* Inform the other side of the MCTP interface that we have read
1063 	 * the packet off the bus before handling the contents of the packet.
1064 	 */
1065 	if (!mctp_astlpc_kcs_send(astlpc, cmd_rx_complete))
1066 		astlpc->layout.rx.state = buffer_state_released;
1067 
1068 	hdr = mctp_pktbuf_hdr(pkt);
1069 	if (hdr->ver != 1) {
1070 		mctp_pktbuf_free(pkt);
1071 		astlpc_prdebug(astlpc, "Dropped packet with invalid version");
1072 		return;
1073 	}
1074 
1075 	/*
1076 	 * v3 will validate the CRC32 in the medium-specific trailer and adjust
1077 	 * the packet size accordingly. On older protocols validation is a no-op
1078 	 * that always returns true.
1079 	 */
1080 	if (astlpc->proto->pktbuf_validate(pkt)) {
1081 		mctp_bus_rx(&astlpc->binding, pkt);
1082 	} else {
1083 		/* TODO: Drop any associated assembly */
1084 		astlpc_prdebug(astlpc, "Dropped corrupt packet");
1085 	}
1086 	mctp_pktbuf_free(pkt);
1087 }
1088 
mctp_astlpc_tx_complete(struct mctp_binding_astlpc * astlpc)1089 static void mctp_astlpc_tx_complete(struct mctp_binding_astlpc *astlpc)
1090 {
1091 	astlpc->layout.tx.state = buffer_state_acquired;
1092 	mctp_binding_set_tx_enabled(&astlpc->binding, true);
1093 }
1094 
mctp_astlpc_finalise_channel(struct mctp_binding_astlpc * astlpc)1095 static int mctp_astlpc_finalise_channel(struct mctp_binding_astlpc *astlpc)
1096 {
1097 	struct mctp_astlpc_layout layout;
1098 	uint16_t negotiated;
1099 	int rc;
1100 
1101 	rc = mctp_astlpc_lpc_read(astlpc, &negotiated,
1102 				  offsetof(struct mctp_lpcmap_hdr,
1103 					   negotiated_ver),
1104 				  sizeof(negotiated));
1105 	if (rc < 0)
1106 		return rc;
1107 
1108 	negotiated = be16toh(negotiated);
1109 	astlpc_prerr(astlpc, "Version negotiation got: %u", negotiated);
1110 
1111 	if (negotiated == ASTLPC_VER_BAD || negotiated < ASTLPC_VER_MIN ||
1112 	    negotiated > ASTLPC_VER_CUR) {
1113 		astlpc_prerr(astlpc, "Failed to negotiate version, got: %u\n",
1114 			     negotiated);
1115 		return -EINVAL;
1116 	}
1117 
1118 	assert(negotiated < ARRAY_SIZE(astlpc_protocol_version));
1119 	astlpc->proto = &astlpc_protocol_version[negotiated];
1120 
1121 	rc = mctp_astlpc_layout_read(astlpc, &layout);
1122 	if (rc < 0)
1123 		return rc;
1124 
1125 	if (!mctp_astlpc_layout_validate(astlpc, &layout)) {
1126 		mctp_prerr("BMC proposed invalid buffer parameters");
1127 		return -EINVAL;
1128 	}
1129 
1130 	astlpc->layout = layout;
1131 
1132 	if (negotiated >= 2)
1133 		astlpc->binding.pkt_size =
1134 			astlpc->proto->body_size(astlpc->layout.tx.size);
1135 
1136 	/* Track buffer ownership */
1137 	astlpc->layout.tx.state = buffer_state_acquired;
1138 	astlpc->layout.rx.state = buffer_state_released;
1139 
1140 	return 0;
1141 }
1142 
mctp_astlpc_update_channel(struct mctp_binding_astlpc * astlpc,uint8_t status)1143 static int mctp_astlpc_update_channel(struct mctp_binding_astlpc *astlpc,
1144 				      uint8_t status)
1145 {
1146 	uint8_t updated;
1147 	int rc = 0;
1148 
1149 	assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
1150 
1151 	updated = astlpc->kcs_status ^ status;
1152 
1153 	astlpc_prdebug(astlpc, "%s: status: 0x%x, update: 0x%x", __func__,
1154 		       status, updated);
1155 
1156 	if (updated & KCS_STATUS_BMC_READY) {
1157 		if (status & KCS_STATUS_BMC_READY) {
1158 			astlpc->kcs_status = status;
1159 			return astlpc->binding.start(&astlpc->binding);
1160 		} else {
1161 			/* Shut down the channel */
1162 			astlpc->layout.rx.state = buffer_state_idle;
1163 			astlpc->layout.tx.state = buffer_state_idle;
1164 			mctp_binding_set_tx_enabled(&astlpc->binding, false);
1165 		}
1166 	}
1167 
1168 	if (astlpc->proto->version == 0 ||
1169 	    updated & KCS_STATUS_CHANNEL_ACTIVE) {
1170 		bool enable;
1171 
1172 		astlpc->layout.rx.state = buffer_state_idle;
1173 		astlpc->layout.tx.state = buffer_state_idle;
1174 		rc = mctp_astlpc_finalise_channel(astlpc);
1175 		enable = (status & KCS_STATUS_CHANNEL_ACTIVE) && rc == 0;
1176 		mctp_binding_set_tx_enabled(&astlpc->binding, enable);
1177 	}
1178 
1179 	astlpc->kcs_status = status;
1180 
1181 	return rc;
1182 }
1183 
mctp_astlpc_tx_done(struct mctp_binding_astlpc * astlpc)1184 bool mctp_astlpc_tx_done(struct mctp_binding_astlpc *astlpc)
1185 {
1186 	return astlpc->layout.tx.state == buffer_state_acquired;
1187 }
1188 
mctp_astlpc_poll(struct mctp_binding_astlpc * astlpc)1189 int mctp_astlpc_poll(struct mctp_binding_astlpc *astlpc)
1190 {
1191 	uint8_t status, data;
1192 	int rc;
1193 
1194 	if (astlpc->layout.rx.state == buffer_state_prepared)
1195 		if (!mctp_astlpc_kcs_send(astlpc, cmd_rx_complete))
1196 			astlpc->layout.rx.state = buffer_state_released;
1197 
1198 	if (astlpc->layout.tx.state == buffer_state_prepared)
1199 		if (!mctp_astlpc_kcs_send(astlpc, cmd_tx_begin))
1200 			astlpc->layout.tx.state = buffer_state_released;
1201 
1202 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status);
1203 	if (rc) {
1204 		astlpc_prwarn(astlpc, "KCS read error");
1205 		return -1;
1206 	}
1207 
1208 	astlpc_prdebug(astlpc, "%s: status: 0x%hhx", __func__, status);
1209 
1210 	if (!mctp_astlpc_kcs_read_ready(astlpc, status))
1211 		return 0;
1212 
1213 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_DATA, &data);
1214 	if (rc) {
1215 		astlpc_prwarn(astlpc, "KCS data read error");
1216 		return -1;
1217 	}
1218 
1219 	astlpc_prdebug(astlpc, "%s: data: 0x%hhx", __func__, data);
1220 
1221 	if (!astlpc->proto->version &&
1222 	    !(data == cmd_initialise || data == cmd_dummy_value)) {
1223 		astlpc_prwarn(astlpc, "Invalid message for binding state: 0x%x",
1224 			      data);
1225 		return 0;
1226 	}
1227 
1228 	switch (data) {
1229 	case cmd_initialise:
1230 		mctp_astlpc_init_channel(astlpc);
1231 		break;
1232 	case cmd_tx_begin:
1233 		if (astlpc->layout.rx.state != buffer_state_released) {
1234 			astlpc_prerr(
1235 				astlpc,
1236 				"Protocol error: Invalid Rx buffer state for event %d: %d\n",
1237 				data, astlpc->layout.rx.state);
1238 			return 0;
1239 		}
1240 		mctp_astlpc_rx_start(astlpc);
1241 		break;
1242 	case cmd_rx_complete:
1243 		if (astlpc->layout.tx.state != buffer_state_released) {
1244 			astlpc_prerr(
1245 				astlpc,
1246 				"Protocol error: Invalid Tx buffer state for event %d: %d\n",
1247 				data, astlpc->layout.tx.state);
1248 			return 0;
1249 		}
1250 		mctp_astlpc_tx_complete(astlpc);
1251 		break;
1252 	case cmd_dummy_value:
1253 		/* No responsibilities for the BMC on 0xff */
1254 		if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST) {
1255 			rc = mctp_astlpc_update_channel(astlpc, status);
1256 			if (rc < 0)
1257 				return rc;
1258 		}
1259 		break;
1260 	default:
1261 		astlpc_prwarn(astlpc, "unknown message 0x%x", data);
1262 	}
1263 
1264 	/* Handle silent loss of bmc-ready */
1265 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST) {
1266 		if (!(status & KCS_STATUS_BMC_READY && data == cmd_dummy_value))
1267 			return mctp_astlpc_update_channel(astlpc, status);
1268 	}
1269 
1270 	return rc;
1271 }
1272 
1273 /* allocate and basic initialisation */
__mctp_astlpc_init(uint8_t mode,uint32_t mtu)1274 static struct mctp_binding_astlpc *__mctp_astlpc_init(uint8_t mode,
1275 						      uint32_t mtu)
1276 {
1277 	struct mctp_binding_astlpc *astlpc;
1278 
1279 	assert((mode == MCTP_BINDING_ASTLPC_MODE_BMC) ||
1280 	       (mode == MCTP_BINDING_ASTLPC_MODE_HOST));
1281 
1282 	astlpc = __mctp_alloc(sizeof(*astlpc));
1283 	if (!astlpc)
1284 		return NULL;
1285 
1286 	memset(astlpc, 0, sizeof(*astlpc));
1287 	astlpc->mode = mode;
1288 	astlpc->lpc_map = NULL;
1289 	astlpc->layout.rx.state = buffer_state_idle;
1290 	astlpc->layout.tx.state = buffer_state_idle;
1291 	astlpc->requested_mtu = mtu;
1292 	astlpc->binding.name = "astlpc";
1293 	astlpc->binding.version = 1;
1294 	astlpc->binding.pkt_header = 4;
1295 	astlpc->binding.pkt_trailer = 4;
1296 	astlpc->binding.tx = mctp_binding_astlpc_tx;
1297 	if (mode == MCTP_BINDING_ASTLPC_MODE_BMC)
1298 		astlpc->binding.start = mctp_binding_astlpc_start_bmc;
1299 	else if (mode == MCTP_BINDING_ASTLPC_MODE_HOST)
1300 		astlpc->binding.start = mctp_binding_astlpc_start_host;
1301 	else {
1302 		astlpc_prerr(astlpc, "%s: Invalid mode: %d\n", __func__, mode);
1303 		__mctp_free(astlpc);
1304 		return NULL;
1305 	}
1306 
1307 	if (mctp_astlpc_set_pkt_size(
1308 		    astlpc,
1309 		    MCTP_PACKET_SIZE(mtu > MCTP_BTU ? mtu : MCTP_BTU)) != 0) {
1310 		astlpc_prerr(astlpc, "%s: Allocation error", __func__);
1311 		__mctp_free(astlpc);
1312 		return NULL;
1313 	}
1314 
1315 	return astlpc;
1316 }
1317 
mctp_binding_astlpc_core(struct mctp_binding_astlpc * b)1318 struct mctp_binding *mctp_binding_astlpc_core(struct mctp_binding_astlpc *b)
1319 {
1320 	return &b->binding;
1321 }
1322 
1323 struct mctp_binding_astlpc *
mctp_astlpc_init(uint8_t mode,uint32_t mtu,void * lpc_map,const struct mctp_binding_astlpc_ops * ops,void * ops_data)1324 mctp_astlpc_init(uint8_t mode, uint32_t mtu, void *lpc_map,
1325 		 const struct mctp_binding_astlpc_ops *ops, void *ops_data)
1326 {
1327 	struct mctp_binding_astlpc *astlpc;
1328 
1329 	if (!(mode == MCTP_BINDING_ASTLPC_MODE_BMC ||
1330 	      mode == MCTP_BINDING_ASTLPC_MODE_HOST)) {
1331 		mctp_prerr("Unknown binding mode: %u", mode);
1332 		return NULL;
1333 	}
1334 
1335 	astlpc = __mctp_astlpc_init(mode, mtu);
1336 	if (!astlpc)
1337 		return NULL;
1338 
1339 	memcpy(&astlpc->ops, ops, sizeof(astlpc->ops));
1340 	astlpc->ops_data = ops_data;
1341 	astlpc->lpc_map = lpc_map;
1342 	astlpc->mode = mode;
1343 
1344 	return astlpc;
1345 }
1346 
1347 struct mctp_binding_astlpc *
mctp_astlpc_init_ops(const struct mctp_binding_astlpc_ops * ops,void * ops_data,void * lpc_map)1348 mctp_astlpc_init_ops(const struct mctp_binding_astlpc_ops *ops, void *ops_data,
1349 		     void *lpc_map)
1350 {
1351 	return mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, lpc_map,
1352 				ops, ops_data);
1353 }
1354 
mctp_astlpc_destroy(struct mctp_binding_astlpc * astlpc)1355 void mctp_astlpc_destroy(struct mctp_binding_astlpc *astlpc)
1356 {
1357 	/* Clear channel-active and bmc-ready */
1358 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC)
1359 		mctp_astlpc_kcs_set_status(astlpc, 0);
1360 	__mctp_free(astlpc->binding.tx_storage);
1361 	__mctp_free(astlpc);
1362 }
1363 
1364 #ifdef MCTP_HAVE_FILEIO
1365 
mctp_astlpc_init_fileio_lpc(struct mctp_binding_astlpc * astlpc)1366 static int mctp_astlpc_init_fileio_lpc(struct mctp_binding_astlpc *astlpc)
1367 {
1368 	struct aspeed_lpc_ctrl_mapping map = {
1369 		.window_type = ASPEED_LPC_CTRL_WINDOW_MEMORY,
1370 		.window_id = 0, /* There's only one */
1371 		.flags = 0,
1372 		.addr = 0,
1373 		.offset = 0,
1374 		.size = 0
1375 	};
1376 	void *lpc_map_base;
1377 	int fd, rc;
1378 
1379 	fd = open(lpc_path, O_RDWR | O_SYNC);
1380 	if (fd < 0) {
1381 		astlpc_prwarn(astlpc, "LPC open (%s) failed", lpc_path);
1382 		return -1;
1383 	}
1384 
1385 	rc = ioctl(fd, ASPEED_LPC_CTRL_IOCTL_GET_SIZE, &map);
1386 	if (rc) {
1387 		astlpc_prwarn(astlpc, "LPC GET_SIZE failed");
1388 		close(fd);
1389 		return -1;
1390 	}
1391 
1392 	/*
1393 	 * ������
1394 	 *
1395 	 * Decouple ourselves from hiomapd[1] (another user of the FW2AHB) by
1396 	 * mapping the FW2AHB to the reserved memory here as well.
1397 	 *
1398 	 * It's not possible to use the MCTP ASTLPC binding on machines that
1399 	 * need the FW2AHB bridge mapped anywhere except to the reserved memory
1400 	 * (e.g. the host SPI NOR).
1401 	 *
1402 	 * [1] https://github.com/openbmc/hiomapd/
1403 	 *
1404 	 * ������
1405 	 *
1406 	 * The following calculation must align with what's going on in
1407 	 * hiomapd's lpc.c so as not to disrupt its behaviour:
1408 	 *
1409 	 * https://github.com/openbmc/hiomapd/blob/5ff50e3cbd7702aefc185264e4adfb9952040575/lpc.c#L68
1410 	 *
1411 	 * ������
1412 	 */
1413 
1414 	/* Map the reserved memory at the top of the 28-bit LPC firmware address space */
1415 	map.addr = 0x0FFFFFFF & -map.size;
1416 	astlpc_prinfo(
1417 		astlpc,
1418 		"Configuring FW2AHB to map reserved memory at 0x%08x for 0x%x in the LPC FW cycle address-space",
1419 		map.addr, map.size);
1420 
1421 	rc = ioctl(fd, ASPEED_LPC_CTRL_IOCTL_MAP, &map);
1422 	if (rc) {
1423 		astlpc_prwarn(astlpc,
1424 			      "Failed to map FW2AHB to reserved memory");
1425 		close(fd);
1426 		return -1;
1427 	}
1428 
1429 	/* Map the reserved memory into our address space */
1430 	lpc_map_base =
1431 		mmap(NULL, map.size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1432 	if (lpc_map_base == MAP_FAILED) {
1433 		astlpc_prwarn(astlpc, "LPC mmap failed");
1434 		rc = -1;
1435 	} else {
1436 		astlpc->lpc_map =
1437 			(uint8_t *)lpc_map_base + map.size - LPC_WIN_SIZE;
1438 	}
1439 
1440 	close(fd);
1441 
1442 	return rc;
1443 }
1444 
mctp_astlpc_init_fileio_kcs(struct mctp_binding_astlpc * astlpc,const char * kcs_path)1445 static int mctp_astlpc_init_fileio_kcs(struct mctp_binding_astlpc *astlpc,
1446 				       const char *kcs_path)
1447 {
1448 	astlpc->kcs_fd = open(kcs_path, O_RDWR);
1449 	if (astlpc->kcs_fd < 0)
1450 		return -1;
1451 
1452 	return 0;
1453 }
1454 
__mctp_astlpc_fileio_kcs_read(void * arg,enum mctp_binding_astlpc_kcs_reg reg,uint8_t * val)1455 static int __mctp_astlpc_fileio_kcs_read(void *arg,
1456 					 enum mctp_binding_astlpc_kcs_reg reg,
1457 					 uint8_t *val)
1458 {
1459 	struct mctp_binding_astlpc *astlpc = arg;
1460 	off_t offset = reg;
1461 	int rc;
1462 
1463 	rc = pread(astlpc->kcs_fd, val, 1, offset);
1464 
1465 	return rc == 1 ? 0 : -1;
1466 }
1467 
__mctp_astlpc_fileio_kcs_write(void * arg,enum mctp_binding_astlpc_kcs_reg reg,uint8_t val)1468 static int __mctp_astlpc_fileio_kcs_write(void *arg,
1469 					  enum mctp_binding_astlpc_kcs_reg reg,
1470 					  uint8_t val)
1471 {
1472 	struct mctp_binding_astlpc *astlpc = arg;
1473 	off_t offset = reg;
1474 	int rc;
1475 
1476 	rc = pwrite(astlpc->kcs_fd, &val, 1, offset);
1477 
1478 	return rc == 1 ? 0 : -1;
1479 }
1480 
mctp_astlpc_init_pollfd(struct mctp_binding_astlpc * astlpc,struct pollfd * pollfd)1481 int mctp_astlpc_init_pollfd(struct mctp_binding_astlpc *astlpc,
1482 			    struct pollfd *pollfd)
1483 {
1484 	bool release;
1485 
1486 	pollfd->fd = astlpc->kcs_fd;
1487 	pollfd->events = 0;
1488 
1489 	release = astlpc->layout.rx.state == buffer_state_prepared ||
1490 		  astlpc->layout.tx.state == buffer_state_prepared;
1491 
1492 	pollfd->events = release ? POLLOUT : POLLIN;
1493 
1494 	return 0;
1495 }
1496 
mctp_astlpc_init_fileio(const char * kcs_path)1497 struct mctp_binding_astlpc *mctp_astlpc_init_fileio(const char *kcs_path)
1498 {
1499 	struct mctp_binding_astlpc *astlpc;
1500 	int rc;
1501 
1502 	/*
1503 	 * If we're doing file IO then we're very likely not running
1504 	 * freestanding, so lets assume that we're on the BMC side.
1505 	 *
1506 	 * Requesting an MTU of 0 requests the largest possible MTU, whatever
1507 	 * value that might take.
1508 	 */
1509 	astlpc = __mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, 0);
1510 	if (!astlpc)
1511 		return NULL;
1512 
1513 	/* Set internal operations for kcs. We use direct accesses to the lpc
1514 	 * map area */
1515 	astlpc->ops.kcs_read = __mctp_astlpc_fileio_kcs_read;
1516 	astlpc->ops.kcs_write = __mctp_astlpc_fileio_kcs_write;
1517 	astlpc->ops_data = astlpc;
1518 
1519 	rc = mctp_astlpc_init_fileio_lpc(astlpc);
1520 	if (rc) {
1521 		free(astlpc);
1522 		return NULL;
1523 	}
1524 
1525 	rc = mctp_astlpc_init_fileio_kcs(astlpc, kcs_path);
1526 	if (rc) {
1527 		free(astlpc);
1528 		return NULL;
1529 	}
1530 
1531 	return astlpc;
1532 }
1533 #else
1534 struct mctp_binding_astlpc *
mctp_astlpc_init_fileio(const char * kcs_path __unused)1535 mctp_astlpc_init_fileio(const char *kcs_path __unused)
1536 {
1537 	mctp_prlog(MCTP_LOG_ERR, "%s: Missing support for file IO", __func__);
1538 	return NULL;
1539 }
1540 
mctp_astlpc_init_pollfd(struct mctp_binding_astlpc * astlpc __unused,struct pollfd * pollfd __unused)1541 int mctp_astlpc_init_pollfd(struct mctp_binding_astlpc *astlpc __unused,
1542 			    struct pollfd *pollfd __unused)
1543 {
1544 	mctp_prlog(MCTP_LOG_ERR, "%s: Missing support for file IO", __func__);
1545 	return -1;
1546 }
1547 #endif
1548