xref: /openbmc/libmctp/astlpc.c (revision b5f74a7c6fde1886101f2fd9a1385febc66306d7)
1 /* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */
2 
3 #if HAVE_CONFIG_H
4 #include "config.h"
5 #endif
6 
7 #if HAVE_ENDIAN_H
8 #include <endian.h>
9 #endif
10 
11 #include <assert.h>
12 #include <errno.h>
13 #include <inttypes.h>
14 #include <stdbool.h>
15 #include <stdlib.h>
16 #include <string.h>
17 
18 #define pr_fmt(x) "astlpc: " x
19 
20 #include "container_of.h"
21 #include "crc32.h"
22 #include "libmctp.h"
23 #include "libmctp-alloc.h"
24 #include "libmctp-log.h"
25 #include "libmctp-astlpc.h"
26 #include "range.h"
27 
28 #ifdef MCTP_HAVE_FILEIO
29 
30 #include <unistd.h>
31 #include <fcntl.h>
32 #include <poll.h>
33 #include <sys/ioctl.h>
34 #include <sys/mman.h>
35 #include <linux/aspeed-lpc-ctrl.h>
36 
37 /* kernel interface */
38 static const char *lpc_path = "/dev/aspeed-lpc-ctrl";
39 
40 #endif
41 
42 enum mctp_astlpc_cmd {
43 	cmd_initialise = 0x00,
44 	cmd_tx_begin = 0x01,
45 	cmd_rx_complete = 0x02,
46 	cmd_dummy_value = 0xff,
47 };
48 
49 enum mctp_astlpc_buffer_state {
50 	/*
51 	 * Prior to "Channel Ready" we mark the buffers as "idle" to catch illegal accesses. In this
52 	 * state neither side is considered the owner of the buffer.
53 	 *
54 	 * Upon "Channel Ready", each side transitions the buffers from the initial "idle" state
55 	 * to the following target states:
56 	 *
57 	 * Tx buffer: "acquired"
58 	 * Rx buffer: "released"
59 	 */
60 	buffer_state_idle,
61 
62 	/*
63 	 * Beyond initialisation by "Channel Ready", buffers are in the "acquired" state once:
64 	 *
65 	 * 1. We dequeue a control command transferring the buffer to our ownership out of the KCS
66 	 *    interface, and
67 	 * 2. We are yet to complete all of our required accesses to the buffer
68 	 *
69 	 * * The Tx buffer enters the "acquired" state when we dequeue the "Rx Complete" command
70 	 * * The Rx buffer enters the "acquired" state when we dequeue the "Tx Begin" command
71 	 *
72 	 * It is a failure of implementation if it's possible for both sides to simultaneously
73 	 * consider a buffer as "acquired".
74 	 */
75 	buffer_state_acquired,
76 
77 	/*
78 	 * Buffers are in the "prepared" state when:
79 	 *
80 	 * 1. We have completed all of our required accesses (read or write) for the buffer, and
81 	 * 2. We have not yet successfully enqueued the control command to hand off ownership
82 	 */
83 	buffer_state_prepared,
84 
85 	/*
86 	 * Beyond initialisation by "Channel Ready", buffers are in the "released" state once:
87 	 *
88 	 * 1. We successfully enqueue the control command transferring ownership to the remote
89 	 *    side in to the KCS interface
90 	 *
91 	 * * The Tx buffer enters the "released" state when we enqueue the "Tx Begin" command
92 	 * * The Rx buffer enters the "released" state when we enqueue the "Rx Complete" command
93 	 *
94 	 * It may be the case that both sides simultaneously consider a buffer to be in the
95 	 * "released" state. However, if this is true, it must also be true that a buffer ownership
96 	 * transfer command has been enqueued in the KCS interface and is yet to be dequeued.
97 	 */
98 	buffer_state_released,
99 };
100 
101 struct mctp_astlpc_buffer {
102 	uint32_t offset;
103 	uint32_t size;
104 	enum mctp_astlpc_buffer_state state;
105 };
106 
107 struct mctp_astlpc_layout {
108 	struct mctp_astlpc_buffer rx;
109 	struct mctp_astlpc_buffer tx;
110 };
111 
112 struct mctp_astlpc_protocol {
113 	uint16_t version;
114 	uint32_t (*packet_size)(uint32_t body);
115 	uint32_t (*body_size)(uint32_t packet);
116 	void (*pktbuf_protect)(struct mctp_pktbuf *pkt);
117 	bool (*pktbuf_validate)(struct mctp_pktbuf *pkt);
118 };
119 
120 struct mctp_binding_astlpc {
121 	struct mctp_binding binding;
122 
123 	void *lpc_map;
124 	struct mctp_astlpc_layout layout;
125 
126 	uint8_t mode;
127 	uint32_t requested_mtu;
128 
129 	const struct mctp_astlpc_protocol *proto;
130 
131 	/* direct ops data */
132 	struct mctp_binding_astlpc_ops ops;
133 	void *ops_data;
134 
135 	/* fileio ops data */
136 	int kcs_fd;
137 	uint8_t kcs_status;
138 };
139 
140 #define binding_to_astlpc(b)                                                   \
141 	container_of(b, struct mctp_binding_astlpc, binding)
142 
143 #define astlpc_prlog(ctx, lvl, fmt, ...)                                       \
144 	do {                                                                   \
145 		bool __bmc = ((ctx)->mode == MCTP_BINDING_ASTLPC_MODE_BMC);    \
146 		mctp_prlog(lvl, pr_fmt("%s: " fmt), __bmc ? "bmc" : "host",    \
147 			   ##__VA_ARGS__);                                     \
148 	} while (0)
149 
150 #define astlpc_prerr(ctx, fmt, ...)                                            \
151 	astlpc_prlog(ctx, MCTP_LOG_ERR, fmt, ##__VA_ARGS__)
152 #define astlpc_prwarn(ctx, fmt, ...)                                           \
153 	astlpc_prlog(ctx, MCTP_LOG_WARNING, fmt, ##__VA_ARGS__)
154 #define astlpc_prnotice(ctx, fmt, ...)                                         \
155 	astlpc_prlog(ctx, MCTP_LOG_NOTICE, fmt, ##__VA_ARGS__)
156 #define astlpc_prinfo(ctx, fmt, ...)                                           \
157 	astlpc_prlog(ctx, MCTP_LOG_INFO, fmt, ##__VA_ARGS__)
158 #define astlpc_prdebug(ctx, fmt, ...)                                          \
159 	astlpc_prlog(ctx, MCTP_LOG_DEBUG, fmt, ##__VA_ARGS__)
160 
161 /* clang-format off */
162 #define ASTLPC_MCTP_MAGIC	0x4d435450
163 #define ASTLPC_VER_BAD	0
164 #define ASTLPC_VER_MIN	1
165 
166 /* Support testing of new binding protocols */
167 #ifndef ASTLPC_VER_CUR
168 #define ASTLPC_VER_CUR	3
169 #endif
170 /* clang-format on */
171 
172 #ifndef ARRAY_SIZE
173 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
174 #endif
175 
astlpc_packet_size_v1(uint32_t body)176 static uint32_t astlpc_packet_size_v1(uint32_t body)
177 {
178 	assert((body + 4) > body);
179 
180 	return body + 4;
181 }
182 
astlpc_body_size_v1(uint32_t packet)183 static uint32_t astlpc_body_size_v1(uint32_t packet)
184 {
185 	assert((packet - 4) < packet);
186 
187 	return packet - 4;
188 }
189 
astlpc_pktbuf_protect_v1(struct mctp_pktbuf * pkt)190 void astlpc_pktbuf_protect_v1(struct mctp_pktbuf *pkt)
191 {
192 	(void)pkt;
193 }
194 
astlpc_pktbuf_validate_v1(struct mctp_pktbuf * pkt)195 bool astlpc_pktbuf_validate_v1(struct mctp_pktbuf *pkt)
196 {
197 	(void)pkt;
198 	return true;
199 }
200 
astlpc_packet_size_v3(uint32_t body)201 static uint32_t astlpc_packet_size_v3(uint32_t body)
202 {
203 	assert((body + 4 + 4) > body);
204 
205 	return body + 4 + 4;
206 }
207 
astlpc_body_size_v3(uint32_t packet)208 static uint32_t astlpc_body_size_v3(uint32_t packet)
209 {
210 	assert((packet - 4 - 4) < packet);
211 
212 	return packet - 4 - 4;
213 }
214 
astlpc_pktbuf_protect_v3(struct mctp_pktbuf * pkt)215 void astlpc_pktbuf_protect_v3(struct mctp_pktbuf *pkt)
216 {
217 	uint32_t code;
218 
219 	code = htobe32(crc32(mctp_pktbuf_hdr(pkt), mctp_pktbuf_size(pkt)));
220 	mctp_prdebug("%s: 0x%" PRIx32, __func__, code);
221 	mctp_pktbuf_push(pkt, &code, 4);
222 }
223 
astlpc_pktbuf_validate_v3(struct mctp_pktbuf * pkt)224 bool astlpc_pktbuf_validate_v3(struct mctp_pktbuf *pkt)
225 {
226 	uint32_t code;
227 	void *check;
228 
229 	code = be32toh(crc32(mctp_pktbuf_hdr(pkt), mctp_pktbuf_size(pkt) - 4));
230 	mctp_prdebug("%s: 0x%" PRIx32, __func__, code);
231 	check = mctp_pktbuf_pop(pkt, 4);
232 	return check && !memcmp(&code, check, 4);
233 }
234 
235 static const struct mctp_astlpc_protocol astlpc_protocol_version[] = {
236 	[0] = {
237 		.version = 0,
238 		.packet_size = NULL,
239 		.body_size = NULL,
240 		.pktbuf_protect = NULL,
241 		.pktbuf_validate = NULL,
242 	},
243 	[1] = {
244 		.version = 1,
245 		.packet_size = astlpc_packet_size_v1,
246 		.body_size = astlpc_body_size_v1,
247 		.pktbuf_protect = astlpc_pktbuf_protect_v1,
248 		.pktbuf_validate = astlpc_pktbuf_validate_v1,
249 	},
250 	[2] = {
251 		.version = 2,
252 		.packet_size = astlpc_packet_size_v1,
253 		.body_size = astlpc_body_size_v1,
254 		.pktbuf_protect = astlpc_pktbuf_protect_v1,
255 		.pktbuf_validate = astlpc_pktbuf_validate_v1,
256 	},
257 	[3] = {
258 		.version = 3,
259 		.packet_size = astlpc_packet_size_v3,
260 		.body_size = astlpc_body_size_v3,
261 		.pktbuf_protect = astlpc_pktbuf_protect_v3,
262 		.pktbuf_validate = astlpc_pktbuf_validate_v3,
263 	},
264 };
265 
266 struct mctp_lpcmap_hdr {
267 	uint32_t magic;
268 
269 	uint16_t bmc_ver_min;
270 	uint16_t bmc_ver_cur;
271 	uint16_t host_ver_min;
272 	uint16_t host_ver_cur;
273 	uint16_t negotiated_ver;
274 	uint16_t pad0;
275 
276 	struct {
277 		uint32_t rx_offset;
278 		uint32_t rx_size;
279 		uint32_t tx_offset;
280 		uint32_t tx_size;
281 	} layout;
282 } __attribute__((packed));
283 
284 static const uint32_t control_size = 0x100;
285 
286 #define LPC_WIN_SIZE (1 * 1024 * 1024)
287 
288 #define KCS_STATUS_BMC_READY	  0x80
289 #define KCS_STATUS_CHANNEL_ACTIVE 0x40
290 #define KCS_STATUS_IBF		  0x02
291 #define KCS_STATUS_OBF		  0x01
292 
mctp_astlpc_kcs_write(struct mctp_binding_astlpc * astlpc,enum mctp_binding_astlpc_kcs_reg reg,uint8_t val)293 static inline int mctp_astlpc_kcs_write(struct mctp_binding_astlpc *astlpc,
294 					enum mctp_binding_astlpc_kcs_reg reg,
295 					uint8_t val)
296 {
297 	return astlpc->ops.kcs_write(astlpc->ops_data, reg, val);
298 }
299 
mctp_astlpc_kcs_read(struct mctp_binding_astlpc * astlpc,enum mctp_binding_astlpc_kcs_reg reg,uint8_t * val)300 static inline int mctp_astlpc_kcs_read(struct mctp_binding_astlpc *astlpc,
301 				       enum mctp_binding_astlpc_kcs_reg reg,
302 				       uint8_t *val)
303 {
304 	return astlpc->ops.kcs_read(astlpc->ops_data, reg, val);
305 }
306 
mctp_astlpc_lpc_write(struct mctp_binding_astlpc * astlpc,const void * buf,long offset,size_t len)307 static inline int mctp_astlpc_lpc_write(struct mctp_binding_astlpc *astlpc,
308 					const void *buf, long offset,
309 					size_t len)
310 {
311 	astlpc_prdebug(astlpc, "%s: %zu bytes to 0x%lx", __func__, len, offset);
312 
313 	assert(offset >= 0);
314 
315 	/* Indirect access */
316 	if (astlpc->ops.lpc_write) {
317 		void *data = astlpc->ops_data;
318 
319 		return astlpc->ops.lpc_write(data, buf, offset, len);
320 	}
321 
322 	/* Direct mapping */
323 	assert(astlpc->lpc_map);
324 	memcpy(&((char *)astlpc->lpc_map)[offset], buf, len);
325 
326 	return 0;
327 }
328 
mctp_astlpc_lpc_read(struct mctp_binding_astlpc * astlpc,void * buf,long offset,size_t len)329 static inline int mctp_astlpc_lpc_read(struct mctp_binding_astlpc *astlpc,
330 				       void *buf, long offset, size_t len)
331 {
332 	astlpc_prdebug(astlpc, "%s: %zu bytes from 0x%lx", __func__, len,
333 		       offset);
334 
335 	assert(offset >= 0);
336 
337 	/* Indirect access */
338 	if (astlpc->ops.lpc_read) {
339 		void *data = astlpc->ops_data;
340 
341 		return astlpc->ops.lpc_read(data, buf, offset, len);
342 	}
343 
344 	/* Direct mapping */
345 	assert(astlpc->lpc_map);
346 	memcpy(buf, &((char *)astlpc->lpc_map)[offset], len);
347 
348 	return 0;
349 }
350 
351 static void
mctp_astlpc_kcs_print_status_write(struct mctp_binding_astlpc * astlpc,uint8_t status)352 mctp_astlpc_kcs_print_status_write(struct mctp_binding_astlpc *astlpc,
353 				   uint8_t status)
354 {
355 	astlpc_prnotice(
356 		astlpc, "Binding state is 0x%hhx: BMC %s, Channel %s, OBF %s",
357 		status, status & KCS_STATUS_BMC_READY ? "active" : "inactive",
358 		status & KCS_STATUS_CHANNEL_ACTIVE ? "active" : "inactive",
359 		status & KCS_STATUS_OBF ? "preserved" : "cleared");
360 }
361 
mctp_astlpc_kcs_set_status(struct mctp_binding_astlpc * astlpc,uint8_t status)362 static int mctp_astlpc_kcs_set_status(struct mctp_binding_astlpc *astlpc,
363 				      uint8_t status)
364 {
365 	uint8_t data;
366 	int rc;
367 
368 	/* Since we're setting the status register, we want the other endpoint
369 	 * to be interrupted. However, some hardware may only raise a host-side
370 	 * interrupt on an ODR event.
371 	 * So, write a dummy value of 0xff to ODR, which will ensure that an
372 	 * interrupt is triggered, and can be ignored by the host.
373 	 */
374 	data = cmd_dummy_value;
375 
376 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, status);
377 	if (rc) {
378 		astlpc_prwarn(astlpc, "KCS status write failed");
379 		return -1;
380 	}
381 
382 	mctp_astlpc_kcs_print_status_write(astlpc, status);
383 
384 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, data);
385 	if (rc) {
386 		astlpc_prwarn(astlpc, "KCS dummy data write failed");
387 		return -1;
388 	}
389 
390 	return 0;
391 }
392 
mctp_astlpc_layout_read(struct mctp_binding_astlpc * astlpc,struct mctp_astlpc_layout * layout)393 static int mctp_astlpc_layout_read(struct mctp_binding_astlpc *astlpc,
394 				   struct mctp_astlpc_layout *layout)
395 {
396 	struct mctp_lpcmap_hdr hdr;
397 	int rc;
398 
399 	rc = mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
400 	if (rc < 0)
401 		return rc;
402 
403 	/* Flip the buffers as the names are defined in terms of the host */
404 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) {
405 		layout->rx.offset = be32toh(hdr.layout.tx_offset);
406 		layout->rx.size = be32toh(hdr.layout.tx_size);
407 		layout->tx.offset = be32toh(hdr.layout.rx_offset);
408 		layout->tx.size = be32toh(hdr.layout.rx_size);
409 	} else {
410 		assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
411 
412 		layout->rx.offset = be32toh(hdr.layout.rx_offset);
413 		layout->rx.size = be32toh(hdr.layout.rx_size);
414 		layout->tx.offset = be32toh(hdr.layout.tx_offset);
415 		layout->tx.size = be32toh(hdr.layout.tx_size);
416 	}
417 
418 	return 0;
419 }
420 
mctp_astlpc_layout_write(struct mctp_binding_astlpc * astlpc,struct mctp_astlpc_layout * layout)421 static int mctp_astlpc_layout_write(struct mctp_binding_astlpc *astlpc,
422 				    struct mctp_astlpc_layout *layout)
423 {
424 	uint32_t rx_size_be;
425 
426 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) {
427 		struct mctp_lpcmap_hdr hdr;
428 
429 		/*
430 		 * Flip the buffers as the names are defined in terms of the
431 		 * host
432 		 */
433 		hdr.layout.rx_offset = htobe32(layout->tx.offset);
434 		hdr.layout.rx_size = htobe32(layout->tx.size);
435 		hdr.layout.tx_offset = htobe32(layout->rx.offset);
436 		hdr.layout.tx_size = htobe32(layout->rx.size);
437 
438 		return mctp_astlpc_lpc_write(astlpc, &hdr.layout,
439 					     offsetof(struct mctp_lpcmap_hdr,
440 						      layout),
441 					     sizeof(hdr.layout));
442 	}
443 
444 	assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
445 
446 	/*
447 	 * As of v2 we only need to write rx_size - the offsets are controlled
448 	 * by the BMC, as is the BMC's rx_size (host tx_size).
449 	 */
450 	rx_size_be = htobe32(layout->rx.size);
451 	return mctp_astlpc_lpc_write(astlpc, &rx_size_be,
452 				     offsetof(struct mctp_lpcmap_hdr,
453 					      layout.rx_size),
454 				     sizeof(rx_size_be));
455 }
456 
457 static bool
mctp_astlpc_buffer_validate(const struct mctp_binding_astlpc * astlpc,const struct mctp_astlpc_buffer * buf,const char * name)458 mctp_astlpc_buffer_validate(const struct mctp_binding_astlpc *astlpc,
459 			    const struct mctp_astlpc_buffer *buf,
460 			    const char *name)
461 {
462 	/* Check for overflow */
463 	if (buf->offset + buf->size < buf->offset) {
464 		mctp_prerr(
465 			"%s packet buffer parameters overflow: offset: 0x%" PRIx32
466 			", size: %" PRIu32,
467 			name, buf->offset, buf->size);
468 		return false;
469 	}
470 
471 	/* Check that the buffers are contained within the allocated space */
472 	if (buf->offset + buf->size > LPC_WIN_SIZE) {
473 		mctp_prerr(
474 			"%s packet buffer parameters exceed %uM window size: offset: 0x%" PRIx32
475 			", size: %" PRIu32,
476 			name, (LPC_WIN_SIZE / (1024 * 1024)), buf->offset,
477 			buf->size);
478 		return false;
479 	}
480 
481 	/* Check that the baseline transmission unit is supported */
482 	if (buf->size <
483 	    astlpc->proto->packet_size(MCTP_PACKET_SIZE(MCTP_BTU))) {
484 		mctp_prerr(
485 			"%s packet buffer too small: Require %" PRIu32
486 			" bytes to support the %u byte baseline transmission unit, found %" PRIu32,
487 			name,
488 			astlpc->proto->packet_size(MCTP_PACKET_SIZE(MCTP_BTU)),
489 			MCTP_BTU, buf->size);
490 		return false;
491 	}
492 
493 	/* Check for overlap with the control space */
494 	if (buf->offset < control_size) {
495 		mctp_prerr(
496 			"%s packet buffer overlaps control region {0x%" PRIx32
497 			", %" PRIu32 "}: Rx {0x%" PRIx32 ", %" PRIu32 "}",
498 			name, 0U, control_size, buf->offset, buf->size);
499 		return false;
500 	}
501 
502 	return true;
503 }
504 
505 static bool
mctp_astlpc_layout_validate(const struct mctp_binding_astlpc * astlpc,const struct mctp_astlpc_layout * layout)506 mctp_astlpc_layout_validate(const struct mctp_binding_astlpc *astlpc,
507 			    const struct mctp_astlpc_layout *layout)
508 {
509 	const struct mctp_astlpc_buffer *rx = &layout->rx;
510 	const struct mctp_astlpc_buffer *tx = &layout->tx;
511 	bool rx_valid, tx_valid;
512 
513 	rx_valid = mctp_astlpc_buffer_validate(astlpc, rx, "Rx");
514 	tx_valid = mctp_astlpc_buffer_validate(astlpc, tx, "Tx");
515 
516 	if (!(rx_valid && tx_valid))
517 		return false;
518 
519 	/* Check that the buffers are disjoint */
520 	if ((rx->offset <= tx->offset && rx->offset + rx->size > tx->offset) ||
521 	    (tx->offset <= rx->offset && tx->offset + tx->size > rx->offset)) {
522 		mctp_prerr("Rx and Tx packet buffers overlap: Rx {0x%" PRIx32
523 			   ", %" PRIu32 "}, Tx {0x%" PRIx32 ", %" PRIu32 "}",
524 			   rx->offset, rx->size, tx->offset, tx->size);
525 		return false;
526 	}
527 
528 	return true;
529 }
530 
mctp_astlpc_init_bmc(struct mctp_binding_astlpc * astlpc)531 static int mctp_astlpc_init_bmc(struct mctp_binding_astlpc *astlpc)
532 {
533 	struct mctp_lpcmap_hdr hdr = { 0 };
534 	uint8_t status;
535 	uint32_t sz;
536 
537 	/*
538 	 * The largest buffer size is half of the allocated MCTP space
539 	 * excluding the control space.
540 	 */
541 	sz = ((LPC_WIN_SIZE - control_size) / 2);
542 
543 	/*
544 	 * Trim the MTU to a multiple of 16 to meet the requirements of 12.17
545 	 * Query Hop in DSP0236 v1.3.0.
546 	 */
547 	sz = MCTP_BODY_SIZE(astlpc->proto->body_size(sz));
548 	sz &= ~0xfUL;
549 	sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(sz));
550 
551 	if (astlpc->requested_mtu) {
552 		uint32_t rpkt, rmtu;
553 
554 		rmtu = astlpc->requested_mtu;
555 		rpkt = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu));
556 		sz = MIN(sz, rpkt);
557 	}
558 
559 	/* Flip the buffers as the names are defined in terms of the host */
560 	astlpc->layout.tx.offset = control_size;
561 	astlpc->layout.tx.size = sz;
562 	astlpc->layout.rx.offset =
563 		astlpc->layout.tx.offset + astlpc->layout.tx.size;
564 	astlpc->layout.rx.size = sz;
565 
566 	if (!mctp_astlpc_layout_validate(astlpc, &astlpc->layout)) {
567 		astlpc_prerr(astlpc, "Cannot support an MTU of %" PRIu32, sz);
568 		return -EINVAL;
569 	}
570 
571 	hdr = (struct mctp_lpcmap_hdr){
572 		.magic = htobe32(ASTLPC_MCTP_MAGIC),
573 		.bmc_ver_min = htobe16(ASTLPC_VER_MIN),
574 		.bmc_ver_cur = htobe16(ASTLPC_VER_CUR),
575 
576 		/* Flip the buffers back as we're now describing the host's
577 		 * configuration to the host */
578 		.layout.rx_offset = htobe32(astlpc->layout.tx.offset),
579 		.layout.rx_size = htobe32(astlpc->layout.tx.size),
580 		.layout.tx_offset = htobe32(astlpc->layout.rx.offset),
581 		.layout.tx_size = htobe32(astlpc->layout.rx.size),
582 	};
583 
584 	mctp_astlpc_lpc_write(astlpc, &hdr, 0, sizeof(hdr));
585 
586 	/*
587 	 * Set status indicating that the BMC is now active. Be explicit about
588 	 * clearing OBF; we're reinitialising the binding and so any previous
589 	 * buffer state is irrelevant.
590 	 */
591 	status = KCS_STATUS_BMC_READY & ~KCS_STATUS_OBF;
592 	return mctp_astlpc_kcs_set_status(astlpc, status);
593 }
594 
mctp_binding_astlpc_start_bmc(struct mctp_binding * b)595 static int mctp_binding_astlpc_start_bmc(struct mctp_binding *b)
596 {
597 	struct mctp_binding_astlpc *astlpc =
598 		container_of(b, struct mctp_binding_astlpc, binding);
599 
600 	astlpc->proto = &astlpc_protocol_version[ASTLPC_VER_CUR];
601 
602 	return mctp_astlpc_init_bmc(astlpc);
603 }
604 
mctp_astlpc_validate_version(uint16_t bmc_ver_min,uint16_t bmc_ver_cur,uint16_t host_ver_min,uint16_t host_ver_cur)605 static bool mctp_astlpc_validate_version(uint16_t bmc_ver_min,
606 					 uint16_t bmc_ver_cur,
607 					 uint16_t host_ver_min,
608 					 uint16_t host_ver_cur)
609 {
610 	if (!(bmc_ver_min && bmc_ver_cur && host_ver_min && host_ver_cur)) {
611 		mctp_prerr("Invalid version present in [%" PRIu16 ", %" PRIu16
612 			   "], [%" PRIu16 ", %" PRIu16 "]",
613 			   bmc_ver_min, bmc_ver_cur, host_ver_min,
614 			   host_ver_cur);
615 		return false;
616 	} else if (bmc_ver_min > bmc_ver_cur) {
617 		mctp_prerr("Invalid bmc version range [%" PRIu16 ", %" PRIu16
618 			   "]",
619 			   bmc_ver_min, bmc_ver_cur);
620 		return false;
621 	} else if (host_ver_min > host_ver_cur) {
622 		mctp_prerr("Invalid host version range [%" PRIu16 ", %" PRIu16
623 			   "]",
624 			   host_ver_min, host_ver_cur);
625 		return false;
626 	} else if ((host_ver_cur < bmc_ver_min) ||
627 		   (host_ver_min > bmc_ver_cur)) {
628 		mctp_prerr(
629 			"Unable to satisfy version negotiation with ranges [%" PRIu16
630 			", %" PRIu16 "] and [%" PRIu16 ", %" PRIu16 "]",
631 			bmc_ver_min, bmc_ver_cur, host_ver_min, host_ver_cur);
632 		return false;
633 	}
634 
635 	return true;
636 }
637 
mctp_astlpc_negotiate_layout_host(struct mctp_binding_astlpc * astlpc)638 static int mctp_astlpc_negotiate_layout_host(struct mctp_binding_astlpc *astlpc)
639 {
640 	struct mctp_astlpc_layout layout;
641 	uint32_t rmtu;
642 	uint32_t sz;
643 	int rc;
644 
645 	rc = mctp_astlpc_layout_read(astlpc, &layout);
646 	if (rc < 0)
647 		return rc;
648 
649 	if (!mctp_astlpc_layout_validate(astlpc, &layout)) {
650 		astlpc_prerr(
651 			astlpc,
652 			"BMC provided invalid buffer layout: Rx {0x%" PRIx32
653 			", %" PRIu32 "}, Tx {0x%" PRIx32 ", %" PRIu32 "}",
654 			layout.rx.offset, layout.rx.size, layout.tx.offset,
655 			layout.tx.size);
656 		return -EINVAL;
657 	}
658 
659 	astlpc_prinfo(astlpc, "Desire an MTU of %" PRIu32 " bytes",
660 		      astlpc->requested_mtu);
661 
662 	rmtu = astlpc->requested_mtu;
663 	sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu));
664 	layout.rx.size = sz;
665 
666 	if (!mctp_astlpc_layout_validate(astlpc, &layout)) {
667 		astlpc_prerr(
668 			astlpc,
669 			"Generated invalid buffer layout with size %" PRIu32
670 			": Rx {0x%" PRIx32 ", %" PRIu32 "}, Tx {0x%" PRIx32
671 			", %" PRIu32 "}",
672 			sz, layout.rx.offset, layout.rx.size, layout.tx.offset,
673 			layout.tx.size);
674 		return -EINVAL;
675 	}
676 
677 	astlpc_prinfo(astlpc, "Requesting MTU of %" PRIu32 " bytes",
678 		      astlpc->requested_mtu);
679 
680 	return mctp_astlpc_layout_write(astlpc, &layout);
681 }
682 
mctp_astlpc_negotiate_version(uint16_t bmc_ver_min,uint16_t bmc_ver_cur,uint16_t host_ver_min,uint16_t host_ver_cur)683 static uint16_t mctp_astlpc_negotiate_version(uint16_t bmc_ver_min,
684 					      uint16_t bmc_ver_cur,
685 					      uint16_t host_ver_min,
686 					      uint16_t host_ver_cur)
687 {
688 	if (!mctp_astlpc_validate_version(bmc_ver_min, bmc_ver_cur,
689 					  host_ver_min, host_ver_cur))
690 		return ASTLPC_VER_BAD;
691 
692 	if (bmc_ver_cur < host_ver_cur)
693 		return bmc_ver_cur;
694 
695 	return host_ver_cur;
696 }
697 
mctp_astlpc_init_host(struct mctp_binding_astlpc * astlpc)698 static int mctp_astlpc_init_host(struct mctp_binding_astlpc *astlpc)
699 {
700 	const uint16_t ver_min_be = htobe16(ASTLPC_VER_MIN);
701 	const uint16_t ver_cur_be = htobe16(ASTLPC_VER_CUR);
702 	uint16_t bmc_ver_min, bmc_ver_cur, negotiated;
703 	struct mctp_lpcmap_hdr hdr;
704 	uint8_t status;
705 	int rc;
706 
707 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status);
708 	if (rc) {
709 		mctp_prwarn("KCS status read failed");
710 		return rc;
711 	}
712 
713 	astlpc->kcs_status = status;
714 
715 	if (!(status & KCS_STATUS_BMC_READY))
716 		return -EHOSTDOWN;
717 
718 	mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
719 
720 	bmc_ver_min = be16toh(hdr.bmc_ver_min);
721 	bmc_ver_cur = be16toh(hdr.bmc_ver_cur);
722 
723 	/* Calculate the expected value of negotiated_ver */
724 	negotiated = mctp_astlpc_negotiate_version(
725 		bmc_ver_min, bmc_ver_cur, ASTLPC_VER_MIN, ASTLPC_VER_CUR);
726 	if (!negotiated) {
727 		astlpc_prerr(astlpc, "Cannot negotiate with invalid versions");
728 		return -EINVAL;
729 	}
730 
731 	/* Assign protocol ops so we can calculate the packet buffer sizes */
732 	assert(negotiated < ARRAY_SIZE(astlpc_protocol_version));
733 	astlpc->proto = &astlpc_protocol_version[negotiated];
734 
735 	/* Negotiate packet buffers in v2 style if the BMC supports it */
736 	if (negotiated >= 2) {
737 		rc = mctp_astlpc_negotiate_layout_host(astlpc);
738 		if (rc < 0)
739 			return rc;
740 	}
741 
742 	/* Advertise the host's supported protocol versions */
743 	mctp_astlpc_lpc_write(astlpc, &ver_min_be,
744 			      offsetof(struct mctp_lpcmap_hdr, host_ver_min),
745 			      sizeof(ver_min_be));
746 
747 	mctp_astlpc_lpc_write(astlpc, &ver_cur_be,
748 			      offsetof(struct mctp_lpcmap_hdr, host_ver_cur),
749 			      sizeof(ver_cur_be));
750 
751 	/* Send channel init command */
752 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, 0x0);
753 	if (rc) {
754 		astlpc_prwarn(astlpc, "KCS write failed");
755 	}
756 
757 	/*
758 	 * Configure the host so `astlpc->proto->version == 0` holds until we
759 	 * receive a subsequent status update from the BMC. Until then,
760 	 * `astlpc->proto->version == 0` indicates that we're yet to complete
761 	 * the channel initialisation handshake.
762 	 *
763 	 * When the BMC provides a status update with KCS_STATUS_CHANNEL_ACTIVE
764 	 * set we will assign the appropriate protocol ops struct in accordance
765 	 * with `negotiated_ver`.
766 	 */
767 	astlpc->proto = &astlpc_protocol_version[ASTLPC_VER_BAD];
768 
769 	return rc;
770 }
771 
mctp_binding_astlpc_start_host(struct mctp_binding * b)772 static int mctp_binding_astlpc_start_host(struct mctp_binding *b)
773 {
774 	struct mctp_binding_astlpc *astlpc =
775 		container_of(b, struct mctp_binding_astlpc, binding);
776 
777 	return mctp_astlpc_init_host(astlpc);
778 }
779 
__mctp_astlpc_kcs_ready(struct mctp_binding_astlpc * astlpc,uint8_t status,bool is_write)780 static bool __mctp_astlpc_kcs_ready(struct mctp_binding_astlpc *astlpc,
781 				    uint8_t status, bool is_write)
782 {
783 	bool is_bmc;
784 	bool ready_state;
785 	uint8_t flag;
786 
787 	is_bmc = (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC);
788 	flag = (is_bmc ^ is_write) ? KCS_STATUS_IBF : KCS_STATUS_OBF;
789 	ready_state = is_write ? 0 : 1;
790 
791 	return !!(status & flag) == ready_state;
792 }
793 
794 static inline bool
mctp_astlpc_kcs_read_ready(struct mctp_binding_astlpc * astlpc,uint8_t status)795 mctp_astlpc_kcs_read_ready(struct mctp_binding_astlpc *astlpc, uint8_t status)
796 {
797 	return __mctp_astlpc_kcs_ready(astlpc, status, false);
798 }
799 
800 static inline bool
mctp_astlpc_kcs_write_ready(struct mctp_binding_astlpc * astlpc,uint8_t status)801 mctp_astlpc_kcs_write_ready(struct mctp_binding_astlpc *astlpc, uint8_t status)
802 {
803 	return __mctp_astlpc_kcs_ready(astlpc, status, true);
804 }
805 
mctp_astlpc_kcs_send(struct mctp_binding_astlpc * astlpc,enum mctp_astlpc_cmd data)806 static int mctp_astlpc_kcs_send(struct mctp_binding_astlpc *astlpc,
807 				enum mctp_astlpc_cmd data)
808 {
809 	uint8_t status;
810 	int rc;
811 
812 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status);
813 	if (rc) {
814 		astlpc_prwarn(astlpc, "KCS status read failed");
815 		return -EIO;
816 	}
817 	if (!mctp_astlpc_kcs_write_ready(astlpc, status))
818 		return -EBUSY;
819 
820 	rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, data);
821 	if (rc) {
822 		astlpc_prwarn(astlpc, "KCS data write failed");
823 		return -EIO;
824 	}
825 
826 	return 0;
827 }
828 
mctp_binding_astlpc_tx(struct mctp_binding * b,struct mctp_pktbuf * pkt)829 static int mctp_binding_astlpc_tx(struct mctp_binding *b,
830 				  struct mctp_pktbuf *pkt)
831 {
832 	struct mctp_binding_astlpc *astlpc = binding_to_astlpc(b);
833 	uint32_t len, len_be;
834 	struct mctp_hdr *hdr;
835 	int rc;
836 
837 	hdr = mctp_pktbuf_hdr(pkt);
838 	len = mctp_pktbuf_size(pkt);
839 
840 	astlpc_prdebug(astlpc,
841 		       "%s: Transmitting %" PRIu32
842 		       "-byte packet (%hhu, %hhu, 0x%hhx)",
843 		       __func__, len, hdr->src, hdr->dest, hdr->flags_seq_tag);
844 
845 	if (len > astlpc->proto->body_size(astlpc->layout.tx.size)) {
846 		astlpc_prwarn(astlpc, "invalid TX len %" PRIu32 ": %" PRIu32,
847 			      len,
848 			      astlpc->proto->body_size(astlpc->layout.tx.size));
849 		return -EMSGSIZE;
850 	}
851 
852 	mctp_binding_set_tx_enabled(b, false);
853 
854 	len_be = htobe32(len);
855 	mctp_astlpc_lpc_write(astlpc, &len_be, astlpc->layout.tx.offset,
856 			      sizeof(len_be));
857 
858 	astlpc->proto->pktbuf_protect(pkt);
859 	len = mctp_pktbuf_size(pkt);
860 
861 	mctp_astlpc_lpc_write(astlpc, hdr, astlpc->layout.tx.offset + 4, len);
862 
863 	astlpc->layout.tx.state = buffer_state_prepared;
864 
865 	rc = mctp_astlpc_kcs_send(astlpc, cmd_tx_begin);
866 	if (!rc)
867 		astlpc->layout.tx.state = buffer_state_released;
868 
869 	return rc == -EBUSY ? 0 : rc;
870 }
871 
872 /* Update binding pkt_size and reallocate tx_storage */
mctp_astlpc_set_pkt_size(struct mctp_binding_astlpc * astlpc,size_t pkt_size)873 static int mctp_astlpc_set_pkt_size(struct mctp_binding_astlpc *astlpc,
874 				    size_t pkt_size)
875 {
876 	size_t body = MCTP_BODY_SIZE(pkt_size);
877 	body += astlpc->binding.pkt_header + astlpc->binding.pkt_trailer;
878 	size_t pktbuf_size = MCTP_PKTBUF_SIZE(body);
879 	/* Reallocate TX storage */
880 	if (astlpc->binding.tx_storage) {
881 		__mctp_free(astlpc->binding.tx_storage);
882 	}
883 	astlpc->binding.tx_storage = __mctp_alloc(pktbuf_size);
884 	if (!astlpc->binding.tx_storage) {
885 		return -ENOMEM;
886 	}
887 
888 	astlpc->binding.pkt_size = pkt_size;
889 	return 0;
890 }
891 
mctp_astlpc_calculate_mtu(struct mctp_binding_astlpc * astlpc,struct mctp_astlpc_layout * layout)892 static uint32_t mctp_astlpc_calculate_mtu(struct mctp_binding_astlpc *astlpc,
893 					  struct mctp_astlpc_layout *layout)
894 {
895 	uint32_t low, high, limit, rpkt;
896 
897 	/* Derive the largest MTU the BMC _can_ support */
898 	low = MIN(astlpc->layout.rx.offset, astlpc->layout.tx.offset);
899 	high = MAX(astlpc->layout.rx.offset, astlpc->layout.tx.offset);
900 	limit = high - low;
901 
902 	/* Determine the largest MTU the BMC _wants_ to support */
903 	if (astlpc->requested_mtu) {
904 		uint32_t rmtu = astlpc->requested_mtu;
905 
906 		rpkt = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu));
907 		limit = MIN(limit, rpkt);
908 	}
909 
910 	/* Determine the accepted MTU, applied both directions by convention */
911 	rpkt = MIN(limit, layout->tx.size);
912 	return MCTP_BODY_SIZE(astlpc->proto->body_size(rpkt));
913 }
914 
mctp_astlpc_negotiate_layout_bmc(struct mctp_binding_astlpc * astlpc)915 static int mctp_astlpc_negotiate_layout_bmc(struct mctp_binding_astlpc *astlpc)
916 {
917 	struct mctp_astlpc_layout proposed, pending;
918 	uint32_t sz, mtu;
919 	int rc;
920 
921 	/* Do we have a valid protocol version? */
922 	if (!astlpc->proto->version)
923 		return -EINVAL;
924 
925 	/* Extract the host's proposed layout */
926 	rc = mctp_astlpc_layout_read(astlpc, &proposed);
927 	if (rc < 0)
928 		return rc;
929 
930 	/* Do we have a reasonable layout? */
931 	if (!mctp_astlpc_layout_validate(astlpc, &proposed))
932 		return -EINVAL;
933 
934 	/* Negotiate the MTU */
935 	mtu = mctp_astlpc_calculate_mtu(astlpc, &proposed);
936 	sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(mtu));
937 
938 	/*
939 	 * Use symmetric MTUs by convention and to pass constraints in rx/tx
940 	 * functions
941 	 */
942 	pending = astlpc->layout;
943 	pending.tx.size = sz;
944 	pending.rx.size = sz;
945 
946 	if (mctp_astlpc_layout_validate(astlpc, &pending)) {
947 		/* We found a sensible Rx MTU, so honour it */
948 		astlpc->layout = pending;
949 
950 		/* Enforce the negotiated MTU */
951 		rc = mctp_astlpc_layout_write(astlpc, &astlpc->layout);
952 		if (rc < 0)
953 			return rc;
954 
955 		astlpc_prinfo(astlpc, "Negotiated an MTU of %" PRIu32 " bytes",
956 			      mtu);
957 	} else {
958 		astlpc_prwarn(astlpc, "MTU negotiation failed");
959 		return -EINVAL;
960 	}
961 
962 	if (astlpc->proto->version >= 2) {
963 		rc = mctp_astlpc_set_pkt_size(astlpc, MCTP_PACKET_SIZE(mtu));
964 		if (rc) {
965 			astlpc_prwarn(astlpc, "Allocation error");
966 			return rc;
967 		}
968 	}
969 
970 	return 0;
971 }
972 
mctp_astlpc_init_channel(struct mctp_binding_astlpc * astlpc)973 static void mctp_astlpc_init_channel(struct mctp_binding_astlpc *astlpc)
974 {
975 	uint16_t negotiated, negotiated_be;
976 	struct mctp_lpcmap_hdr hdr;
977 	uint8_t status;
978 	int rc;
979 
980 	mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
981 
982 	/* Version negotiation */
983 	negotiated = mctp_astlpc_negotiate_version(ASTLPC_VER_MIN,
984 						   ASTLPC_VER_CUR,
985 						   be16toh(hdr.host_ver_min),
986 						   be16toh(hdr.host_ver_cur));
987 
988 	/* MTU negotiation requires knowing which protocol we'll use */
989 	assert(negotiated < ARRAY_SIZE(astlpc_protocol_version));
990 	astlpc->proto = &astlpc_protocol_version[negotiated];
991 
992 	/* Host Rx MTU negotiation: Failure terminates channel init */
993 	rc = mctp_astlpc_negotiate_layout_bmc(astlpc);
994 	if (rc < 0)
995 		negotiated = ASTLPC_VER_BAD;
996 
997 	/* Populate the negotiated version */
998 	negotiated_be = htobe16(negotiated);
999 	mctp_astlpc_lpc_write(astlpc, &negotiated_be,
1000 			      offsetof(struct mctp_lpcmap_hdr, negotiated_ver),
1001 			      sizeof(negotiated_be));
1002 
1003 	/* Track buffer ownership */
1004 	astlpc->layout.tx.state = buffer_state_acquired;
1005 	astlpc->layout.rx.state = buffer_state_released;
1006 
1007 	/* Finalise the configuration */
1008 	status = KCS_STATUS_BMC_READY | KCS_STATUS_OBF;
1009 	if (negotiated > 0) {
1010 		astlpc_prinfo(astlpc, "Negotiated binding version %" PRIu16,
1011 			      negotiated);
1012 		status |= KCS_STATUS_CHANNEL_ACTIVE;
1013 	} else {
1014 		astlpc_prerr(astlpc, "Failed to initialise channel");
1015 	}
1016 
1017 	mctp_astlpc_kcs_set_status(astlpc, status);
1018 
1019 	mctp_binding_set_tx_enabled(&astlpc->binding,
1020 				    status & KCS_STATUS_CHANNEL_ACTIVE);
1021 }
1022 
mctp_astlpc_rx_start(struct mctp_binding_astlpc * astlpc)1023 static void mctp_astlpc_rx_start(struct mctp_binding_astlpc *astlpc)
1024 {
1025 	struct mctp_pktbuf *pkt;
1026 	struct mctp_hdr *hdr;
1027 	uint32_t body, packet;
1028 
1029 	mctp_astlpc_lpc_read(astlpc, &body, astlpc->layout.rx.offset,
1030 			     sizeof(body));
1031 	body = be32toh(body);
1032 
1033 	if (body > astlpc->proto->body_size(astlpc->layout.rx.size)) {
1034 		astlpc_prwarn(astlpc, "invalid RX len 0x%x", body);
1035 		return;
1036 	}
1037 
1038 	if ((size_t)body > astlpc->binding.pkt_size) {
1039 		astlpc_prwarn(astlpc, "invalid RX len 0x%x", body);
1040 		return;
1041 	}
1042 
1043 	/* Eliminate the medium-specific header that we just read */
1044 	packet = astlpc->proto->packet_size(body) - 4;
1045 	pkt = mctp_pktbuf_alloc(&astlpc->binding, packet);
1046 	if (!pkt) {
1047 		astlpc_prwarn(astlpc, "unable to allocate pktbuf len 0x%x",
1048 			      packet);
1049 		return;
1050 	}
1051 
1052 	/*
1053 	 * Read payload and medium-specific trailer from immediately after the
1054 	 * medium-specific header.
1055 	 */
1056 	mctp_astlpc_lpc_read(astlpc, mctp_pktbuf_hdr(pkt),
1057 			     astlpc->layout.rx.offset + 4, packet);
1058 
1059 	astlpc->layout.rx.state = buffer_state_prepared;
1060 
1061 	/* Inform the other side of the MCTP interface that we have read
1062 	 * the packet off the bus before handling the contents of the packet.
1063 	 */
1064 	if (!mctp_astlpc_kcs_send(astlpc, cmd_rx_complete))
1065 		astlpc->layout.rx.state = buffer_state_released;
1066 
1067 	hdr = mctp_pktbuf_hdr(pkt);
1068 	if (hdr->ver != 1) {
1069 		mctp_pktbuf_free(pkt);
1070 		astlpc_prdebug(astlpc, "Dropped packet with invalid version");
1071 		return;
1072 	}
1073 
1074 	/*
1075 	 * v3 will validate the CRC32 in the medium-specific trailer and adjust
1076 	 * the packet size accordingly. On older protocols validation is a no-op
1077 	 * that always returns true.
1078 	 */
1079 	if (astlpc->proto->pktbuf_validate(pkt)) {
1080 		mctp_bus_rx(&astlpc->binding, pkt);
1081 	} else {
1082 		/* TODO: Drop any associated assembly */
1083 		astlpc_prdebug(astlpc, "Dropped corrupt packet");
1084 	}
1085 	mctp_pktbuf_free(pkt);
1086 }
1087 
mctp_astlpc_tx_complete(struct mctp_binding_astlpc * astlpc)1088 static void mctp_astlpc_tx_complete(struct mctp_binding_astlpc *astlpc)
1089 {
1090 	astlpc->layout.tx.state = buffer_state_acquired;
1091 	mctp_binding_set_tx_enabled(&astlpc->binding, true);
1092 }
1093 
mctp_astlpc_finalise_channel(struct mctp_binding_astlpc * astlpc)1094 static int mctp_astlpc_finalise_channel(struct mctp_binding_astlpc *astlpc)
1095 {
1096 	struct mctp_astlpc_layout layout;
1097 	uint16_t negotiated;
1098 	int rc;
1099 
1100 	rc = mctp_astlpc_lpc_read(astlpc, &negotiated,
1101 				  offsetof(struct mctp_lpcmap_hdr,
1102 					   negotiated_ver),
1103 				  sizeof(negotiated));
1104 	if (rc < 0)
1105 		return rc;
1106 
1107 	negotiated = be16toh(negotiated);
1108 	astlpc_prerr(astlpc, "Version negotiation got: %u", negotiated);
1109 
1110 	if (negotiated == ASTLPC_VER_BAD || negotiated < ASTLPC_VER_MIN ||
1111 	    negotiated > ASTLPC_VER_CUR) {
1112 		astlpc_prerr(astlpc, "Failed to negotiate version, got: %u\n",
1113 			     negotiated);
1114 		return -EINVAL;
1115 	}
1116 
1117 	assert(negotiated < ARRAY_SIZE(astlpc_protocol_version));
1118 	astlpc->proto = &astlpc_protocol_version[negotiated];
1119 
1120 	rc = mctp_astlpc_layout_read(astlpc, &layout);
1121 	if (rc < 0)
1122 		return rc;
1123 
1124 	if (!mctp_astlpc_layout_validate(astlpc, &layout)) {
1125 		mctp_prerr("BMC proposed invalid buffer parameters");
1126 		return -EINVAL;
1127 	}
1128 
1129 	astlpc->layout = layout;
1130 
1131 	if (negotiated >= 2)
1132 		astlpc->binding.pkt_size =
1133 			astlpc->proto->body_size(astlpc->layout.tx.size);
1134 
1135 	/* Track buffer ownership */
1136 	astlpc->layout.tx.state = buffer_state_acquired;
1137 	astlpc->layout.rx.state = buffer_state_released;
1138 
1139 	return 0;
1140 }
1141 
mctp_astlpc_update_channel(struct mctp_binding_astlpc * astlpc,uint8_t status)1142 static int mctp_astlpc_update_channel(struct mctp_binding_astlpc *astlpc,
1143 				      uint8_t status)
1144 {
1145 	uint8_t updated;
1146 	int rc = 0;
1147 
1148 	assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
1149 
1150 	updated = astlpc->kcs_status ^ status;
1151 
1152 	astlpc_prdebug(astlpc, "%s: status: 0x%x, update: 0x%x", __func__,
1153 		       status, updated);
1154 
1155 	if (updated & KCS_STATUS_BMC_READY) {
1156 		if (status & KCS_STATUS_BMC_READY) {
1157 			astlpc->kcs_status = status;
1158 			return astlpc->binding.start(&astlpc->binding);
1159 		} else {
1160 			/* Shut down the channel */
1161 			astlpc->layout.rx.state = buffer_state_idle;
1162 			astlpc->layout.tx.state = buffer_state_idle;
1163 			mctp_binding_set_tx_enabled(&astlpc->binding, false);
1164 		}
1165 	}
1166 
1167 	if (astlpc->proto->version == 0 ||
1168 	    updated & KCS_STATUS_CHANNEL_ACTIVE) {
1169 		bool enable;
1170 
1171 		astlpc->layout.rx.state = buffer_state_idle;
1172 		astlpc->layout.tx.state = buffer_state_idle;
1173 		rc = mctp_astlpc_finalise_channel(astlpc);
1174 		enable = (status & KCS_STATUS_CHANNEL_ACTIVE) && rc == 0;
1175 		mctp_binding_set_tx_enabled(&astlpc->binding, enable);
1176 	}
1177 
1178 	astlpc->kcs_status = status;
1179 
1180 	return rc;
1181 }
1182 
mctp_astlpc_tx_done(struct mctp_binding_astlpc * astlpc)1183 bool mctp_astlpc_tx_done(struct mctp_binding_astlpc *astlpc)
1184 {
1185 	return astlpc->layout.tx.state == buffer_state_acquired;
1186 }
1187 
mctp_astlpc_poll(struct mctp_binding_astlpc * astlpc)1188 int mctp_astlpc_poll(struct mctp_binding_astlpc *astlpc)
1189 {
1190 	uint8_t status, data;
1191 	int rc;
1192 
1193 	if (astlpc->layout.rx.state == buffer_state_prepared)
1194 		if (!mctp_astlpc_kcs_send(astlpc, cmd_rx_complete))
1195 			astlpc->layout.rx.state = buffer_state_released;
1196 
1197 	if (astlpc->layout.tx.state == buffer_state_prepared)
1198 		if (!mctp_astlpc_kcs_send(astlpc, cmd_tx_begin))
1199 			astlpc->layout.tx.state = buffer_state_released;
1200 
1201 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status);
1202 	if (rc) {
1203 		astlpc_prwarn(astlpc, "KCS read error");
1204 		return -1;
1205 	}
1206 
1207 	astlpc_prdebug(astlpc, "%s: status: 0x%hhx", __func__, status);
1208 
1209 	if (!mctp_astlpc_kcs_read_ready(astlpc, status))
1210 		return 0;
1211 
1212 	rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_DATA, &data);
1213 	if (rc) {
1214 		astlpc_prwarn(astlpc, "KCS data read error");
1215 		return -1;
1216 	}
1217 
1218 	astlpc_prdebug(astlpc, "%s: data: 0x%hhx", __func__, data);
1219 
1220 	if (!astlpc->proto->version &&
1221 	    !(data == cmd_initialise || data == cmd_dummy_value)) {
1222 		astlpc_prwarn(astlpc, "Invalid message for binding state: 0x%x",
1223 			      data);
1224 		return 0;
1225 	}
1226 
1227 	switch (data) {
1228 	case cmd_initialise:
1229 		mctp_astlpc_init_channel(astlpc);
1230 		break;
1231 	case cmd_tx_begin:
1232 		if (astlpc->layout.rx.state != buffer_state_released) {
1233 			astlpc_prerr(
1234 				astlpc,
1235 				"Protocol error: Invalid Rx buffer state for event %d: %d\n",
1236 				data, astlpc->layout.rx.state);
1237 			return 0;
1238 		}
1239 		mctp_astlpc_rx_start(astlpc);
1240 		break;
1241 	case cmd_rx_complete:
1242 		if (astlpc->layout.tx.state != buffer_state_released) {
1243 			astlpc_prerr(
1244 				astlpc,
1245 				"Protocol error: Invalid Tx buffer state for event %d: %d\n",
1246 				data, astlpc->layout.tx.state);
1247 			return 0;
1248 		}
1249 		mctp_astlpc_tx_complete(astlpc);
1250 		break;
1251 	case cmd_dummy_value:
1252 		/* No responsibilities for the BMC on 0xff */
1253 		if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST) {
1254 			rc = mctp_astlpc_update_channel(astlpc, status);
1255 			if (rc < 0)
1256 				return rc;
1257 		}
1258 		break;
1259 	default:
1260 		astlpc_prwarn(astlpc, "unknown message 0x%x", data);
1261 	}
1262 
1263 	/* Handle silent loss of bmc-ready */
1264 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST) {
1265 		if (!(status & KCS_STATUS_BMC_READY && data == cmd_dummy_value))
1266 			return mctp_astlpc_update_channel(astlpc, status);
1267 	}
1268 
1269 	return rc;
1270 }
1271 
1272 /* allocate and basic initialisation */
__mctp_astlpc_init(uint8_t mode,uint32_t mtu)1273 static struct mctp_binding_astlpc *__mctp_astlpc_init(uint8_t mode,
1274 						      uint32_t mtu)
1275 {
1276 	struct mctp_binding_astlpc *astlpc;
1277 
1278 	assert((mode == MCTP_BINDING_ASTLPC_MODE_BMC) ||
1279 	       (mode == MCTP_BINDING_ASTLPC_MODE_HOST));
1280 
1281 	astlpc = __mctp_alloc(sizeof(*astlpc));
1282 	if (!astlpc)
1283 		return NULL;
1284 
1285 	memset(astlpc, 0, sizeof(*astlpc));
1286 	astlpc->mode = mode;
1287 	astlpc->lpc_map = NULL;
1288 	astlpc->layout.rx.state = buffer_state_idle;
1289 	astlpc->layout.tx.state = buffer_state_idle;
1290 	astlpc->requested_mtu = mtu;
1291 	astlpc->binding.name = "astlpc";
1292 	astlpc->binding.version = 1;
1293 	astlpc->binding.pkt_header = 4;
1294 	astlpc->binding.pkt_trailer = 4;
1295 	astlpc->binding.tx = mctp_binding_astlpc_tx;
1296 	if (mode == MCTP_BINDING_ASTLPC_MODE_BMC)
1297 		astlpc->binding.start = mctp_binding_astlpc_start_bmc;
1298 	else if (mode == MCTP_BINDING_ASTLPC_MODE_HOST)
1299 		astlpc->binding.start = mctp_binding_astlpc_start_host;
1300 	else {
1301 		astlpc_prerr(astlpc, "%s: Invalid mode: %d\n", __func__, mode);
1302 		__mctp_free(astlpc);
1303 		return NULL;
1304 	}
1305 
1306 	if (mctp_astlpc_set_pkt_size(
1307 		    astlpc,
1308 		    MCTP_PACKET_SIZE(mtu > MCTP_BTU ? mtu : MCTP_BTU)) != 0) {
1309 		astlpc_prerr(astlpc, "%s: Allocation error", __func__);
1310 		__mctp_free(astlpc);
1311 		return NULL;
1312 	}
1313 
1314 	return astlpc;
1315 }
1316 
mctp_binding_astlpc_core(struct mctp_binding_astlpc * b)1317 struct mctp_binding *mctp_binding_astlpc_core(struct mctp_binding_astlpc *b)
1318 {
1319 	return &b->binding;
1320 }
1321 
1322 struct mctp_binding_astlpc *
mctp_astlpc_init(uint8_t mode,uint32_t mtu,void * lpc_map,const struct mctp_binding_astlpc_ops * ops,void * ops_data)1323 mctp_astlpc_init(uint8_t mode, uint32_t mtu, void *lpc_map,
1324 		 const struct mctp_binding_astlpc_ops *ops, void *ops_data)
1325 {
1326 	struct mctp_binding_astlpc *astlpc;
1327 
1328 	if (!(mode == MCTP_BINDING_ASTLPC_MODE_BMC ||
1329 	      mode == MCTP_BINDING_ASTLPC_MODE_HOST)) {
1330 		mctp_prerr("Unknown binding mode: %u", mode);
1331 		return NULL;
1332 	}
1333 
1334 	astlpc = __mctp_astlpc_init(mode, mtu);
1335 	if (!astlpc)
1336 		return NULL;
1337 
1338 	memcpy(&astlpc->ops, ops, sizeof(astlpc->ops));
1339 	astlpc->ops_data = ops_data;
1340 	astlpc->lpc_map = lpc_map;
1341 	astlpc->mode = mode;
1342 
1343 	return astlpc;
1344 }
1345 
1346 struct mctp_binding_astlpc *
mctp_astlpc_init_ops(const struct mctp_binding_astlpc_ops * ops,void * ops_data,void * lpc_map)1347 mctp_astlpc_init_ops(const struct mctp_binding_astlpc_ops *ops, void *ops_data,
1348 		     void *lpc_map)
1349 {
1350 	return mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, lpc_map,
1351 				ops, ops_data);
1352 }
1353 
mctp_astlpc_destroy(struct mctp_binding_astlpc * astlpc)1354 void mctp_astlpc_destroy(struct mctp_binding_astlpc *astlpc)
1355 {
1356 	/* Clear channel-active and bmc-ready */
1357 	if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC)
1358 		mctp_astlpc_kcs_set_status(astlpc, 0);
1359 	__mctp_free(astlpc->binding.tx_storage);
1360 	__mctp_free(astlpc);
1361 }
1362 
1363 #ifdef MCTP_HAVE_FILEIO
1364 
mctp_astlpc_init_fileio_lpc(struct mctp_binding_astlpc * astlpc)1365 static int mctp_astlpc_init_fileio_lpc(struct mctp_binding_astlpc *astlpc)
1366 {
1367 	struct aspeed_lpc_ctrl_mapping map = {
1368 		.window_type = ASPEED_LPC_CTRL_WINDOW_MEMORY,
1369 		.window_id = 0, /* There's only one */
1370 		.flags = 0,
1371 		.addr = 0,
1372 		.offset = 0,
1373 		.size = 0
1374 	};
1375 	void *lpc_map_base;
1376 	int fd, rc;
1377 
1378 	fd = open(lpc_path, O_RDWR | O_SYNC);
1379 	if (fd < 0) {
1380 		astlpc_prwarn(astlpc, "LPC open (%s) failed", lpc_path);
1381 		return -1;
1382 	}
1383 
1384 	rc = ioctl(fd, ASPEED_LPC_CTRL_IOCTL_GET_SIZE, &map);
1385 	if (rc) {
1386 		astlpc_prwarn(astlpc, "LPC GET_SIZE failed");
1387 		close(fd);
1388 		return -1;
1389 	}
1390 
1391 	/*
1392 	 * ������
1393 	 *
1394 	 * Decouple ourselves from hiomapd[1] (another user of the FW2AHB) by
1395 	 * mapping the FW2AHB to the reserved memory here as well.
1396 	 *
1397 	 * It's not possible to use the MCTP ASTLPC binding on machines that
1398 	 * need the FW2AHB bridge mapped anywhere except to the reserved memory
1399 	 * (e.g. the host SPI NOR).
1400 	 *
1401 	 * [1] https://github.com/openbmc/hiomapd/
1402 	 *
1403 	 * ������
1404 	 *
1405 	 * The following calculation must align with what's going on in
1406 	 * hiomapd's lpc.c so as not to disrupt its behaviour:
1407 	 *
1408 	 * https://github.com/openbmc/hiomapd/blob/5ff50e3cbd7702aefc185264e4adfb9952040575/lpc.c#L68
1409 	 *
1410 	 * ������
1411 	 */
1412 
1413 	/* Map the reserved memory at the top of the 28-bit LPC firmware address space */
1414 	map.addr = 0x0FFFFFFF & -map.size;
1415 	astlpc_prinfo(
1416 		astlpc,
1417 		"Configuring FW2AHB to map reserved memory at 0x%08x for 0x%x in the LPC FW cycle address-space",
1418 		map.addr, map.size);
1419 
1420 	rc = ioctl(fd, ASPEED_LPC_CTRL_IOCTL_MAP, &map);
1421 	if (rc) {
1422 		astlpc_prwarn(astlpc,
1423 			      "Failed to map FW2AHB to reserved memory");
1424 		close(fd);
1425 		return -1;
1426 	}
1427 
1428 	/* Map the reserved memory into our address space */
1429 	lpc_map_base =
1430 		mmap(NULL, map.size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1431 	if (lpc_map_base == MAP_FAILED) {
1432 		astlpc_prwarn(astlpc, "LPC mmap failed");
1433 		rc = -1;
1434 	} else {
1435 		astlpc->lpc_map =
1436 			(uint8_t *)lpc_map_base + map.size - LPC_WIN_SIZE;
1437 	}
1438 
1439 	close(fd);
1440 
1441 	return rc;
1442 }
1443 
mctp_astlpc_init_fileio_kcs(struct mctp_binding_astlpc * astlpc,const char * kcs_path)1444 static int mctp_astlpc_init_fileio_kcs(struct mctp_binding_astlpc *astlpc,
1445 				       const char *kcs_path)
1446 {
1447 	astlpc->kcs_fd = open(kcs_path, O_RDWR);
1448 	if (astlpc->kcs_fd < 0)
1449 		return -1;
1450 
1451 	return 0;
1452 }
1453 
__mctp_astlpc_fileio_kcs_read(void * arg,enum mctp_binding_astlpc_kcs_reg reg,uint8_t * val)1454 static int __mctp_astlpc_fileio_kcs_read(void *arg,
1455 					 enum mctp_binding_astlpc_kcs_reg reg,
1456 					 uint8_t *val)
1457 {
1458 	struct mctp_binding_astlpc *astlpc = arg;
1459 	off_t offset = reg;
1460 	int rc;
1461 
1462 	rc = pread(astlpc->kcs_fd, val, 1, offset);
1463 
1464 	return rc == 1 ? 0 : -1;
1465 }
1466 
__mctp_astlpc_fileio_kcs_write(void * arg,enum mctp_binding_astlpc_kcs_reg reg,uint8_t val)1467 static int __mctp_astlpc_fileio_kcs_write(void *arg,
1468 					  enum mctp_binding_astlpc_kcs_reg reg,
1469 					  uint8_t val)
1470 {
1471 	struct mctp_binding_astlpc *astlpc = arg;
1472 	off_t offset = reg;
1473 	int rc;
1474 
1475 	rc = pwrite(astlpc->kcs_fd, &val, 1, offset);
1476 
1477 	return rc == 1 ? 0 : -1;
1478 }
1479 
mctp_astlpc_init_pollfd(struct mctp_binding_astlpc * astlpc,struct pollfd * pollfd)1480 int mctp_astlpc_init_pollfd(struct mctp_binding_astlpc *astlpc,
1481 			    struct pollfd *pollfd)
1482 {
1483 	bool release;
1484 
1485 	pollfd->fd = astlpc->kcs_fd;
1486 	pollfd->events = 0;
1487 
1488 	release = astlpc->layout.rx.state == buffer_state_prepared ||
1489 		  astlpc->layout.tx.state == buffer_state_prepared;
1490 
1491 	pollfd->events = release ? POLLOUT : POLLIN;
1492 
1493 	return 0;
1494 }
1495 
mctp_astlpc_init_fileio(const char * kcs_path)1496 struct mctp_binding_astlpc *mctp_astlpc_init_fileio(const char *kcs_path)
1497 {
1498 	struct mctp_binding_astlpc *astlpc;
1499 	int rc;
1500 
1501 	/*
1502 	 * If we're doing file IO then we're very likely not running
1503 	 * freestanding, so lets assume that we're on the BMC side.
1504 	 *
1505 	 * Requesting an MTU of 0 requests the largest possible MTU, whatever
1506 	 * value that might take.
1507 	 */
1508 	astlpc = __mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, 0);
1509 	if (!astlpc)
1510 		return NULL;
1511 
1512 	/* Set internal operations for kcs. We use direct accesses to the lpc
1513 	 * map area */
1514 	astlpc->ops.kcs_read = __mctp_astlpc_fileio_kcs_read;
1515 	astlpc->ops.kcs_write = __mctp_astlpc_fileio_kcs_write;
1516 	astlpc->ops_data = astlpc;
1517 
1518 	rc = mctp_astlpc_init_fileio_lpc(astlpc);
1519 	if (rc) {
1520 		free(astlpc);
1521 		return NULL;
1522 	}
1523 
1524 	rc = mctp_astlpc_init_fileio_kcs(astlpc, kcs_path);
1525 	if (rc) {
1526 		free(astlpc);
1527 		return NULL;
1528 	}
1529 
1530 	return astlpc;
1531 }
1532 #else
1533 struct mctp_binding_astlpc *
mctp_astlpc_init_fileio(const char * kcs_path __unused)1534 mctp_astlpc_init_fileio(const char *kcs_path __unused)
1535 {
1536 	mctp_prlog(MCTP_LOG_ERR, "%s: Missing support for file IO", __func__);
1537 	return NULL;
1538 }
1539 
mctp_astlpc_init_pollfd(struct mctp_binding_astlpc * astlpc __unused,struct pollfd * pollfd __unused)1540 int mctp_astlpc_init_pollfd(struct mctp_binding_astlpc *astlpc __unused,
1541 			    struct pollfd *pollfd __unused)
1542 {
1543 	mctp_prlog(MCTP_LOG_ERR, "%s: Missing support for file IO", __func__);
1544 	return -1;
1545 }
1546 #endif
1547