1 /* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */
2
3 #if HAVE_CONFIG_H
4 #include "config.h"
5 #endif
6
7 #if HAVE_ENDIAN_H
8 #include <endian.h>
9 #endif
10
11 #include <assert.h>
12 #include <err.h>
13 #include <errno.h>
14 #include <inttypes.h>
15 #include <stdbool.h>
16 #include <stdlib.h>
17 #include <string.h>
18
19 #define pr_fmt(x) "astlpc: " x
20
21 #include "container_of.h"
22 #include "crc32.h"
23 #include "libmctp.h"
24 #include "libmctp-alloc.h"
25 #include "libmctp-log.h"
26 #include "libmctp-astlpc.h"
27 #include "range.h"
28
29 #ifdef MCTP_HAVE_FILEIO
30
31 #include <unistd.h>
32 #include <fcntl.h>
33 #include <poll.h>
34 #include <sys/ioctl.h>
35 #include <sys/mman.h>
36 #include <linux/aspeed-lpc-ctrl.h>
37
38 /* kernel interface */
39 static const char *lpc_path = "/dev/aspeed-lpc-ctrl";
40
41 #endif
42
43 enum mctp_astlpc_cmd {
44 cmd_initialise = 0x00,
45 cmd_tx_begin = 0x01,
46 cmd_rx_complete = 0x02,
47 cmd_dummy_value = 0xff,
48 };
49
50 enum mctp_astlpc_buffer_state {
51 /*
52 * Prior to "Channel Ready" we mark the buffers as "idle" to catch illegal accesses. In this
53 * state neither side is considered the owner of the buffer.
54 *
55 * Upon "Channel Ready", each side transitions the buffers from the initial "idle" state
56 * to the following target states:
57 *
58 * Tx buffer: "acquired"
59 * Rx buffer: "released"
60 */
61 buffer_state_idle,
62
63 /*
64 * Beyond initialisation by "Channel Ready", buffers are in the "acquired" state once:
65 *
66 * 1. We dequeue a control command transferring the buffer to our ownership out of the KCS
67 * interface, and
68 * 2. We are yet to complete all of our required accesses to the buffer
69 *
70 * * The Tx buffer enters the "acquired" state when we dequeue the "Rx Complete" command
71 * * The Rx buffer enters the "acquired" state when we dequeue the "Tx Begin" command
72 *
73 * It is a failure of implementation if it's possible for both sides to simultaneously
74 * consider a buffer as "acquired".
75 */
76 buffer_state_acquired,
77
78 /*
79 * Buffers are in the "prepared" state when:
80 *
81 * 1. We have completed all of our required accesses (read or write) for the buffer, and
82 * 2. We have not yet successfully enqueued the control command to hand off ownership
83 */
84 buffer_state_prepared,
85
86 /*
87 * Beyond initialisation by "Channel Ready", buffers are in the "released" state once:
88 *
89 * 1. We successfully enqueue the control command transferring ownership to the remote
90 * side in to the KCS interface
91 *
92 * * The Tx buffer enters the "released" state when we enqueue the "Tx Begin" command
93 * * The Rx buffer enters the "released" state when we enqueue the "Rx Complete" command
94 *
95 * It may be the case that both sides simultaneously consider a buffer to be in the
96 * "released" state. However, if this is true, it must also be true that a buffer ownership
97 * transfer command has been enqueued in the KCS interface and is yet to be dequeued.
98 */
99 buffer_state_released,
100 };
101
102 struct mctp_astlpc_buffer {
103 uint32_t offset;
104 uint32_t size;
105 enum mctp_astlpc_buffer_state state;
106 };
107
108 struct mctp_astlpc_layout {
109 struct mctp_astlpc_buffer rx;
110 struct mctp_astlpc_buffer tx;
111 };
112
113 struct mctp_astlpc_protocol {
114 uint16_t version;
115 uint32_t (*packet_size)(uint32_t body);
116 uint32_t (*body_size)(uint32_t packet);
117 void (*pktbuf_protect)(struct mctp_pktbuf *pkt);
118 bool (*pktbuf_validate)(struct mctp_pktbuf *pkt);
119 };
120
121 struct mctp_binding_astlpc {
122 struct mctp_binding binding;
123
124 void *lpc_map;
125 struct mctp_astlpc_layout layout;
126
127 uint8_t mode;
128 uint32_t requested_mtu;
129
130 const struct mctp_astlpc_protocol *proto;
131
132 /* direct ops data */
133 struct mctp_binding_astlpc_ops ops;
134 void *ops_data;
135
136 /* fileio ops data */
137 int kcs_fd;
138 uint8_t kcs_status;
139 };
140
141 #define binding_to_astlpc(b) \
142 container_of(b, struct mctp_binding_astlpc, binding)
143
144 #define astlpc_prlog(ctx, lvl, fmt, ...) \
145 do { \
146 bool __bmc = ((ctx)->mode == MCTP_BINDING_ASTLPC_MODE_BMC); \
147 mctp_prlog(lvl, pr_fmt("%s: " fmt), __bmc ? "bmc" : "host", \
148 ##__VA_ARGS__); \
149 } while (0)
150
151 #define astlpc_prerr(ctx, fmt, ...) \
152 astlpc_prlog(ctx, MCTP_LOG_ERR, fmt, ##__VA_ARGS__)
153 #define astlpc_prwarn(ctx, fmt, ...) \
154 astlpc_prlog(ctx, MCTP_LOG_WARNING, fmt, ##__VA_ARGS__)
155 #define astlpc_prnotice(ctx, fmt, ...) \
156 astlpc_prlog(ctx, MCTP_LOG_NOTICE, fmt, ##__VA_ARGS__)
157 #define astlpc_prinfo(ctx, fmt, ...) \
158 astlpc_prlog(ctx, MCTP_LOG_INFO, fmt, ##__VA_ARGS__)
159 #define astlpc_prdebug(ctx, fmt, ...) \
160 astlpc_prlog(ctx, MCTP_LOG_DEBUG, fmt, ##__VA_ARGS__)
161
162 /* clang-format off */
163 #define ASTLPC_MCTP_MAGIC 0x4d435450
164 #define ASTLPC_VER_BAD 0
165 #define ASTLPC_VER_MIN 1
166
167 /* Support testing of new binding protocols */
168 #ifndef ASTLPC_VER_CUR
169 #define ASTLPC_VER_CUR 3
170 #endif
171 /* clang-format on */
172
173 #ifndef ARRAY_SIZE
174 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
175 #endif
176
astlpc_packet_size_v1(uint32_t body)177 static uint32_t astlpc_packet_size_v1(uint32_t body)
178 {
179 assert((body + 4) > body);
180
181 return body + 4;
182 }
183
astlpc_body_size_v1(uint32_t packet)184 static uint32_t astlpc_body_size_v1(uint32_t packet)
185 {
186 assert((packet - 4) < packet);
187
188 return packet - 4;
189 }
190
astlpc_pktbuf_protect_v1(struct mctp_pktbuf * pkt)191 void astlpc_pktbuf_protect_v1(struct mctp_pktbuf *pkt)
192 {
193 (void)pkt;
194 }
195
astlpc_pktbuf_validate_v1(struct mctp_pktbuf * pkt)196 bool astlpc_pktbuf_validate_v1(struct mctp_pktbuf *pkt)
197 {
198 (void)pkt;
199 return true;
200 }
201
astlpc_packet_size_v3(uint32_t body)202 static uint32_t astlpc_packet_size_v3(uint32_t body)
203 {
204 assert((body + 4 + 4) > body);
205
206 return body + 4 + 4;
207 }
208
astlpc_body_size_v3(uint32_t packet)209 static uint32_t astlpc_body_size_v3(uint32_t packet)
210 {
211 assert((packet - 4 - 4) < packet);
212
213 return packet - 4 - 4;
214 }
215
astlpc_pktbuf_protect_v3(struct mctp_pktbuf * pkt)216 void astlpc_pktbuf_protect_v3(struct mctp_pktbuf *pkt)
217 {
218 uint32_t code;
219
220 code = htobe32(crc32(mctp_pktbuf_hdr(pkt), mctp_pktbuf_size(pkt)));
221 mctp_prdebug("%s: 0x%" PRIx32, __func__, code);
222 mctp_pktbuf_push(pkt, &code, 4);
223 }
224
astlpc_pktbuf_validate_v3(struct mctp_pktbuf * pkt)225 bool astlpc_pktbuf_validate_v3(struct mctp_pktbuf *pkt)
226 {
227 uint32_t code;
228 void *check;
229
230 code = be32toh(crc32(mctp_pktbuf_hdr(pkt), mctp_pktbuf_size(pkt) - 4));
231 mctp_prdebug("%s: 0x%" PRIx32, __func__, code);
232 check = mctp_pktbuf_pop(pkt, 4);
233 return check && !memcmp(&code, check, 4);
234 }
235
236 static const struct mctp_astlpc_protocol astlpc_protocol_version[] = {
237 [0] = {
238 .version = 0,
239 .packet_size = NULL,
240 .body_size = NULL,
241 .pktbuf_protect = NULL,
242 .pktbuf_validate = NULL,
243 },
244 [1] = {
245 .version = 1,
246 .packet_size = astlpc_packet_size_v1,
247 .body_size = astlpc_body_size_v1,
248 .pktbuf_protect = astlpc_pktbuf_protect_v1,
249 .pktbuf_validate = astlpc_pktbuf_validate_v1,
250 },
251 [2] = {
252 .version = 2,
253 .packet_size = astlpc_packet_size_v1,
254 .body_size = astlpc_body_size_v1,
255 .pktbuf_protect = astlpc_pktbuf_protect_v1,
256 .pktbuf_validate = astlpc_pktbuf_validate_v1,
257 },
258 [3] = {
259 .version = 3,
260 .packet_size = astlpc_packet_size_v3,
261 .body_size = astlpc_body_size_v3,
262 .pktbuf_protect = astlpc_pktbuf_protect_v3,
263 .pktbuf_validate = astlpc_pktbuf_validate_v3,
264 },
265 };
266
267 struct mctp_lpcmap_hdr {
268 uint32_t magic;
269
270 uint16_t bmc_ver_min;
271 uint16_t bmc_ver_cur;
272 uint16_t host_ver_min;
273 uint16_t host_ver_cur;
274 uint16_t negotiated_ver;
275 uint16_t pad0;
276
277 struct {
278 uint32_t rx_offset;
279 uint32_t rx_size;
280 uint32_t tx_offset;
281 uint32_t tx_size;
282 } layout;
283 } __attribute__((packed));
284
285 static const uint32_t control_size = 0x100;
286
287 #define LPC_WIN_SIZE (1 * 1024 * 1024)
288
289 #define KCS_STATUS_BMC_READY 0x80
290 #define KCS_STATUS_CHANNEL_ACTIVE 0x40
291 #define KCS_STATUS_IBF 0x02
292 #define KCS_STATUS_OBF 0x01
293
mctp_astlpc_kcs_write(struct mctp_binding_astlpc * astlpc,enum mctp_binding_astlpc_kcs_reg reg,uint8_t val)294 static inline int mctp_astlpc_kcs_write(struct mctp_binding_astlpc *astlpc,
295 enum mctp_binding_astlpc_kcs_reg reg,
296 uint8_t val)
297 {
298 return astlpc->ops.kcs_write(astlpc->ops_data, reg, val);
299 }
300
mctp_astlpc_kcs_read(struct mctp_binding_astlpc * astlpc,enum mctp_binding_astlpc_kcs_reg reg,uint8_t * val)301 static inline int mctp_astlpc_kcs_read(struct mctp_binding_astlpc *astlpc,
302 enum mctp_binding_astlpc_kcs_reg reg,
303 uint8_t *val)
304 {
305 return astlpc->ops.kcs_read(astlpc->ops_data, reg, val);
306 }
307
mctp_astlpc_lpc_write(struct mctp_binding_astlpc * astlpc,const void * buf,long offset,size_t len)308 static inline int mctp_astlpc_lpc_write(struct mctp_binding_astlpc *astlpc,
309 const void *buf, long offset,
310 size_t len)
311 {
312 astlpc_prdebug(astlpc, "%s: %zu bytes to 0x%lx", __func__, len, offset);
313
314 assert(offset >= 0);
315
316 /* Indirect access */
317 if (astlpc->ops.lpc_write) {
318 void *data = astlpc->ops_data;
319
320 return astlpc->ops.lpc_write(data, buf, offset, len);
321 }
322
323 /* Direct mapping */
324 assert(astlpc->lpc_map);
325 memcpy(&((char *)astlpc->lpc_map)[offset], buf, len);
326
327 return 0;
328 }
329
mctp_astlpc_lpc_read(struct mctp_binding_astlpc * astlpc,void * buf,long offset,size_t len)330 static inline int mctp_astlpc_lpc_read(struct mctp_binding_astlpc *astlpc,
331 void *buf, long offset, size_t len)
332 {
333 astlpc_prdebug(astlpc, "%s: %zu bytes from 0x%lx", __func__, len,
334 offset);
335
336 assert(offset >= 0);
337
338 /* Indirect access */
339 if (astlpc->ops.lpc_read) {
340 void *data = astlpc->ops_data;
341
342 return astlpc->ops.lpc_read(data, buf, offset, len);
343 }
344
345 /* Direct mapping */
346 assert(astlpc->lpc_map);
347 memcpy(buf, &((char *)astlpc->lpc_map)[offset], len);
348
349 return 0;
350 }
351
352 static void
mctp_astlpc_kcs_print_status_write(struct mctp_binding_astlpc * astlpc,uint8_t status)353 mctp_astlpc_kcs_print_status_write(struct mctp_binding_astlpc *astlpc,
354 uint8_t status)
355 {
356 astlpc_prnotice(
357 astlpc, "Binding state is 0x%hhx: BMC %s, Channel %s, OBF %s",
358 status, status & KCS_STATUS_BMC_READY ? "active" : "inactive",
359 status & KCS_STATUS_CHANNEL_ACTIVE ? "active" : "inactive",
360 status & KCS_STATUS_OBF ? "preserved" : "cleared");
361 }
362
mctp_astlpc_kcs_set_status(struct mctp_binding_astlpc * astlpc,uint8_t status)363 static int mctp_astlpc_kcs_set_status(struct mctp_binding_astlpc *astlpc,
364 uint8_t status)
365 {
366 uint8_t data;
367 int rc;
368
369 /* Since we're setting the status register, we want the other endpoint
370 * to be interrupted. However, some hardware may only raise a host-side
371 * interrupt on an ODR event.
372 * So, write a dummy value of 0xff to ODR, which will ensure that an
373 * interrupt is triggered, and can be ignored by the host.
374 */
375 data = cmd_dummy_value;
376
377 rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, status);
378 if (rc) {
379 astlpc_prwarn(astlpc, "KCS status write failed");
380 return -1;
381 }
382
383 mctp_astlpc_kcs_print_status_write(astlpc, status);
384
385 rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, data);
386 if (rc) {
387 astlpc_prwarn(astlpc, "KCS dummy data write failed");
388 return -1;
389 }
390
391 return 0;
392 }
393
mctp_astlpc_layout_read(struct mctp_binding_astlpc * astlpc,struct mctp_astlpc_layout * layout)394 static int mctp_astlpc_layout_read(struct mctp_binding_astlpc *astlpc,
395 struct mctp_astlpc_layout *layout)
396 {
397 struct mctp_lpcmap_hdr hdr;
398 int rc;
399
400 rc = mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
401 if (rc < 0)
402 return rc;
403
404 /* Flip the buffers as the names are defined in terms of the host */
405 if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) {
406 layout->rx.offset = be32toh(hdr.layout.tx_offset);
407 layout->rx.size = be32toh(hdr.layout.tx_size);
408 layout->tx.offset = be32toh(hdr.layout.rx_offset);
409 layout->tx.size = be32toh(hdr.layout.rx_size);
410 } else {
411 assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
412
413 layout->rx.offset = be32toh(hdr.layout.rx_offset);
414 layout->rx.size = be32toh(hdr.layout.rx_size);
415 layout->tx.offset = be32toh(hdr.layout.tx_offset);
416 layout->tx.size = be32toh(hdr.layout.tx_size);
417 }
418
419 return 0;
420 }
421
mctp_astlpc_layout_write(struct mctp_binding_astlpc * astlpc,struct mctp_astlpc_layout * layout)422 static int mctp_astlpc_layout_write(struct mctp_binding_astlpc *astlpc,
423 struct mctp_astlpc_layout *layout)
424 {
425 uint32_t rx_size_be;
426
427 if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC) {
428 struct mctp_lpcmap_hdr hdr;
429
430 /*
431 * Flip the buffers as the names are defined in terms of the
432 * host
433 */
434 hdr.layout.rx_offset = htobe32(layout->tx.offset);
435 hdr.layout.rx_size = htobe32(layout->tx.size);
436 hdr.layout.tx_offset = htobe32(layout->rx.offset);
437 hdr.layout.tx_size = htobe32(layout->rx.size);
438
439 return mctp_astlpc_lpc_write(astlpc, &hdr.layout,
440 offsetof(struct mctp_lpcmap_hdr,
441 layout),
442 sizeof(hdr.layout));
443 }
444
445 assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
446
447 /*
448 * As of v2 we only need to write rx_size - the offsets are controlled
449 * by the BMC, as is the BMC's rx_size (host tx_size).
450 */
451 rx_size_be = htobe32(layout->rx.size);
452 return mctp_astlpc_lpc_write(astlpc, &rx_size_be,
453 offsetof(struct mctp_lpcmap_hdr,
454 layout.rx_size),
455 sizeof(rx_size_be));
456 }
457
458 static bool
mctp_astlpc_buffer_validate(const struct mctp_binding_astlpc * astlpc,const struct mctp_astlpc_buffer * buf,const char * name)459 mctp_astlpc_buffer_validate(const struct mctp_binding_astlpc *astlpc,
460 const struct mctp_astlpc_buffer *buf,
461 const char *name)
462 {
463 /* Check for overflow */
464 if (buf->offset + buf->size < buf->offset) {
465 mctp_prerr(
466 "%s packet buffer parameters overflow: offset: 0x%" PRIx32
467 ", size: %" PRIu32,
468 name, buf->offset, buf->size);
469 return false;
470 }
471
472 /* Check that the buffers are contained within the allocated space */
473 if (buf->offset + buf->size > LPC_WIN_SIZE) {
474 mctp_prerr(
475 "%s packet buffer parameters exceed %uM window size: offset: 0x%" PRIx32
476 ", size: %" PRIu32,
477 name, (LPC_WIN_SIZE / (1024 * 1024)), buf->offset,
478 buf->size);
479 return false;
480 }
481
482 /* Check that the baseline transmission unit is supported */
483 if (buf->size <
484 astlpc->proto->packet_size(MCTP_PACKET_SIZE(MCTP_BTU))) {
485 mctp_prerr(
486 "%s packet buffer too small: Require %" PRIu32
487 " bytes to support the %u byte baseline transmission unit, found %" PRIu32,
488 name,
489 astlpc->proto->packet_size(MCTP_PACKET_SIZE(MCTP_BTU)),
490 MCTP_BTU, buf->size);
491 return false;
492 }
493
494 /* Check for overlap with the control space */
495 if (buf->offset < control_size) {
496 mctp_prerr(
497 "%s packet buffer overlaps control region {0x%" PRIx32
498 ", %" PRIu32 "}: Rx {0x%" PRIx32 ", %" PRIu32 "}",
499 name, 0U, control_size, buf->offset, buf->size);
500 return false;
501 }
502
503 return true;
504 }
505
506 static bool
mctp_astlpc_layout_validate(const struct mctp_binding_astlpc * astlpc,const struct mctp_astlpc_layout * layout)507 mctp_astlpc_layout_validate(const struct mctp_binding_astlpc *astlpc,
508 const struct mctp_astlpc_layout *layout)
509 {
510 const struct mctp_astlpc_buffer *rx = &layout->rx;
511 const struct mctp_astlpc_buffer *tx = &layout->tx;
512 bool rx_valid, tx_valid;
513
514 rx_valid = mctp_astlpc_buffer_validate(astlpc, rx, "Rx");
515 tx_valid = mctp_astlpc_buffer_validate(astlpc, tx, "Tx");
516
517 if (!(rx_valid && tx_valid))
518 return false;
519
520 /* Check that the buffers are disjoint */
521 if ((rx->offset <= tx->offset && rx->offset + rx->size > tx->offset) ||
522 (tx->offset <= rx->offset && tx->offset + tx->size > rx->offset)) {
523 mctp_prerr("Rx and Tx packet buffers overlap: Rx {0x%" PRIx32
524 ", %" PRIu32 "}, Tx {0x%" PRIx32 ", %" PRIu32 "}",
525 rx->offset, rx->size, tx->offset, tx->size);
526 return false;
527 }
528
529 return true;
530 }
531
mctp_astlpc_init_bmc(struct mctp_binding_astlpc * astlpc)532 static int mctp_astlpc_init_bmc(struct mctp_binding_astlpc *astlpc)
533 {
534 struct mctp_lpcmap_hdr hdr = { 0 };
535 uint8_t status;
536 uint32_t sz;
537
538 /*
539 * The largest buffer size is half of the allocated MCTP space
540 * excluding the control space.
541 */
542 sz = ((LPC_WIN_SIZE - control_size) / 2);
543
544 /*
545 * Trim the MTU to a multiple of 16 to meet the requirements of 12.17
546 * Query Hop in DSP0236 v1.3.0.
547 */
548 sz = MCTP_BODY_SIZE(astlpc->proto->body_size(sz));
549 sz &= ~0xfUL;
550 sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(sz));
551
552 if (astlpc->requested_mtu) {
553 uint32_t rpkt, rmtu;
554
555 rmtu = astlpc->requested_mtu;
556 rpkt = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu));
557 sz = MIN(sz, rpkt);
558 }
559
560 /* Flip the buffers as the names are defined in terms of the host */
561 astlpc->layout.tx.offset = control_size;
562 astlpc->layout.tx.size = sz;
563 astlpc->layout.rx.offset =
564 astlpc->layout.tx.offset + astlpc->layout.tx.size;
565 astlpc->layout.rx.size = sz;
566
567 if (!mctp_astlpc_layout_validate(astlpc, &astlpc->layout)) {
568 astlpc_prerr(astlpc, "Cannot support an MTU of %" PRIu32, sz);
569 return -EINVAL;
570 }
571
572 hdr = (struct mctp_lpcmap_hdr){
573 .magic = htobe32(ASTLPC_MCTP_MAGIC),
574 .bmc_ver_min = htobe16(ASTLPC_VER_MIN),
575 .bmc_ver_cur = htobe16(ASTLPC_VER_CUR),
576
577 /* Flip the buffers back as we're now describing the host's
578 * configuration to the host */
579 .layout.rx_offset = htobe32(astlpc->layout.tx.offset),
580 .layout.rx_size = htobe32(astlpc->layout.tx.size),
581 .layout.tx_offset = htobe32(astlpc->layout.rx.offset),
582 .layout.tx_size = htobe32(astlpc->layout.rx.size),
583 };
584
585 mctp_astlpc_lpc_write(astlpc, &hdr, 0, sizeof(hdr));
586
587 /*
588 * Set status indicating that the BMC is now active. Be explicit about
589 * clearing OBF; we're reinitialising the binding and so any previous
590 * buffer state is irrelevant.
591 */
592 status = KCS_STATUS_BMC_READY & ~KCS_STATUS_OBF;
593 return mctp_astlpc_kcs_set_status(astlpc, status);
594 }
595
mctp_binding_astlpc_start_bmc(struct mctp_binding * b)596 static int mctp_binding_astlpc_start_bmc(struct mctp_binding *b)
597 {
598 struct mctp_binding_astlpc *astlpc =
599 container_of(b, struct mctp_binding_astlpc, binding);
600
601 astlpc->proto = &astlpc_protocol_version[ASTLPC_VER_CUR];
602
603 return mctp_astlpc_init_bmc(astlpc);
604 }
605
mctp_astlpc_validate_version(uint16_t bmc_ver_min,uint16_t bmc_ver_cur,uint16_t host_ver_min,uint16_t host_ver_cur)606 static bool mctp_astlpc_validate_version(uint16_t bmc_ver_min,
607 uint16_t bmc_ver_cur,
608 uint16_t host_ver_min,
609 uint16_t host_ver_cur)
610 {
611 if (!(bmc_ver_min && bmc_ver_cur && host_ver_min && host_ver_cur)) {
612 mctp_prerr("Invalid version present in [%" PRIu16 ", %" PRIu16
613 "], [%" PRIu16 ", %" PRIu16 "]",
614 bmc_ver_min, bmc_ver_cur, host_ver_min,
615 host_ver_cur);
616 return false;
617 } else if (bmc_ver_min > bmc_ver_cur) {
618 mctp_prerr("Invalid bmc version range [%" PRIu16 ", %" PRIu16
619 "]",
620 bmc_ver_min, bmc_ver_cur);
621 return false;
622 } else if (host_ver_min > host_ver_cur) {
623 mctp_prerr("Invalid host version range [%" PRIu16 ", %" PRIu16
624 "]",
625 host_ver_min, host_ver_cur);
626 return false;
627 } else if ((host_ver_cur < bmc_ver_min) ||
628 (host_ver_min > bmc_ver_cur)) {
629 mctp_prerr(
630 "Unable to satisfy version negotiation with ranges [%" PRIu16
631 ", %" PRIu16 "] and [%" PRIu16 ", %" PRIu16 "]",
632 bmc_ver_min, bmc_ver_cur, host_ver_min, host_ver_cur);
633 return false;
634 }
635
636 return true;
637 }
638
mctp_astlpc_negotiate_layout_host(struct mctp_binding_astlpc * astlpc)639 static int mctp_astlpc_negotiate_layout_host(struct mctp_binding_astlpc *astlpc)
640 {
641 struct mctp_astlpc_layout layout;
642 uint32_t rmtu;
643 uint32_t sz;
644 int rc;
645
646 rc = mctp_astlpc_layout_read(astlpc, &layout);
647 if (rc < 0)
648 return rc;
649
650 if (!mctp_astlpc_layout_validate(astlpc, &layout)) {
651 astlpc_prerr(
652 astlpc,
653 "BMC provided invalid buffer layout: Rx {0x%" PRIx32
654 ", %" PRIu32 "}, Tx {0x%" PRIx32 ", %" PRIu32 "}",
655 layout.rx.offset, layout.rx.size, layout.tx.offset,
656 layout.tx.size);
657 return -EINVAL;
658 }
659
660 astlpc_prinfo(astlpc, "Desire an MTU of %" PRIu32 " bytes",
661 astlpc->requested_mtu);
662
663 rmtu = astlpc->requested_mtu;
664 sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu));
665 layout.rx.size = sz;
666
667 if (!mctp_astlpc_layout_validate(astlpc, &layout)) {
668 astlpc_prerr(
669 astlpc,
670 "Generated invalid buffer layout with size %" PRIu32
671 ": Rx {0x%" PRIx32 ", %" PRIu32 "}, Tx {0x%" PRIx32
672 ", %" PRIu32 "}",
673 sz, layout.rx.offset, layout.rx.size, layout.tx.offset,
674 layout.tx.size);
675 return -EINVAL;
676 }
677
678 astlpc_prinfo(astlpc, "Requesting MTU of %" PRIu32 " bytes",
679 astlpc->requested_mtu);
680
681 return mctp_astlpc_layout_write(astlpc, &layout);
682 }
683
mctp_astlpc_negotiate_version(uint16_t bmc_ver_min,uint16_t bmc_ver_cur,uint16_t host_ver_min,uint16_t host_ver_cur)684 static uint16_t mctp_astlpc_negotiate_version(uint16_t bmc_ver_min,
685 uint16_t bmc_ver_cur,
686 uint16_t host_ver_min,
687 uint16_t host_ver_cur)
688 {
689 if (!mctp_astlpc_validate_version(bmc_ver_min, bmc_ver_cur,
690 host_ver_min, host_ver_cur))
691 return ASTLPC_VER_BAD;
692
693 if (bmc_ver_cur < host_ver_cur)
694 return bmc_ver_cur;
695
696 return host_ver_cur;
697 }
698
mctp_astlpc_init_host(struct mctp_binding_astlpc * astlpc)699 static int mctp_astlpc_init_host(struct mctp_binding_astlpc *astlpc)
700 {
701 const uint16_t ver_min_be = htobe16(ASTLPC_VER_MIN);
702 const uint16_t ver_cur_be = htobe16(ASTLPC_VER_CUR);
703 uint16_t bmc_ver_min, bmc_ver_cur, negotiated;
704 struct mctp_lpcmap_hdr hdr;
705 uint8_t status;
706 int rc;
707
708 rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status);
709 if (rc) {
710 mctp_prwarn("KCS status read failed");
711 return rc;
712 }
713
714 astlpc->kcs_status = status;
715
716 if (!(status & KCS_STATUS_BMC_READY))
717 return -EHOSTDOWN;
718
719 mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
720
721 bmc_ver_min = be16toh(hdr.bmc_ver_min);
722 bmc_ver_cur = be16toh(hdr.bmc_ver_cur);
723
724 /* Calculate the expected value of negotiated_ver */
725 negotiated = mctp_astlpc_negotiate_version(
726 bmc_ver_min, bmc_ver_cur, ASTLPC_VER_MIN, ASTLPC_VER_CUR);
727 if (!negotiated) {
728 astlpc_prerr(astlpc, "Cannot negotiate with invalid versions");
729 return -EINVAL;
730 }
731
732 /* Assign protocol ops so we can calculate the packet buffer sizes */
733 assert(negotiated < ARRAY_SIZE(astlpc_protocol_version));
734 astlpc->proto = &astlpc_protocol_version[negotiated];
735
736 /* Negotiate packet buffers in v2 style if the BMC supports it */
737 if (negotiated >= 2) {
738 rc = mctp_astlpc_negotiate_layout_host(astlpc);
739 if (rc < 0)
740 return rc;
741 }
742
743 /* Advertise the host's supported protocol versions */
744 mctp_astlpc_lpc_write(astlpc, &ver_min_be,
745 offsetof(struct mctp_lpcmap_hdr, host_ver_min),
746 sizeof(ver_min_be));
747
748 mctp_astlpc_lpc_write(astlpc, &ver_cur_be,
749 offsetof(struct mctp_lpcmap_hdr, host_ver_cur),
750 sizeof(ver_cur_be));
751
752 /* Send channel init command */
753 rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, 0x0);
754 if (rc) {
755 astlpc_prwarn(astlpc, "KCS write failed");
756 }
757
758 /*
759 * Configure the host so `astlpc->proto->version == 0` holds until we
760 * receive a subsequent status update from the BMC. Until then,
761 * `astlpc->proto->version == 0` indicates that we're yet to complete
762 * the channel initialisation handshake.
763 *
764 * When the BMC provides a status update with KCS_STATUS_CHANNEL_ACTIVE
765 * set we will assign the appropriate protocol ops struct in accordance
766 * with `negotiated_ver`.
767 */
768 astlpc->proto = &astlpc_protocol_version[ASTLPC_VER_BAD];
769
770 return rc;
771 }
772
mctp_binding_astlpc_start_host(struct mctp_binding * b)773 static int mctp_binding_astlpc_start_host(struct mctp_binding *b)
774 {
775 struct mctp_binding_astlpc *astlpc =
776 container_of(b, struct mctp_binding_astlpc, binding);
777
778 return mctp_astlpc_init_host(astlpc);
779 }
780
__mctp_astlpc_kcs_ready(struct mctp_binding_astlpc * astlpc,uint8_t status,bool is_write)781 static bool __mctp_astlpc_kcs_ready(struct mctp_binding_astlpc *astlpc,
782 uint8_t status, bool is_write)
783 {
784 bool is_bmc;
785 bool ready_state;
786 uint8_t flag;
787
788 is_bmc = (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC);
789 flag = (is_bmc ^ is_write) ? KCS_STATUS_IBF : KCS_STATUS_OBF;
790 ready_state = is_write ? 0 : 1;
791
792 return !!(status & flag) == ready_state;
793 }
794
795 static inline bool
mctp_astlpc_kcs_read_ready(struct mctp_binding_astlpc * astlpc,uint8_t status)796 mctp_astlpc_kcs_read_ready(struct mctp_binding_astlpc *astlpc, uint8_t status)
797 {
798 return __mctp_astlpc_kcs_ready(astlpc, status, false);
799 }
800
801 static inline bool
mctp_astlpc_kcs_write_ready(struct mctp_binding_astlpc * astlpc,uint8_t status)802 mctp_astlpc_kcs_write_ready(struct mctp_binding_astlpc *astlpc, uint8_t status)
803 {
804 return __mctp_astlpc_kcs_ready(astlpc, status, true);
805 }
806
mctp_astlpc_kcs_send(struct mctp_binding_astlpc * astlpc,enum mctp_astlpc_cmd data)807 static int mctp_astlpc_kcs_send(struct mctp_binding_astlpc *astlpc,
808 enum mctp_astlpc_cmd data)
809 {
810 uint8_t status;
811 int rc;
812
813 rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status);
814 if (rc) {
815 astlpc_prwarn(astlpc, "KCS status read failed");
816 return -EIO;
817 }
818 if (!mctp_astlpc_kcs_write_ready(astlpc, status))
819 return -EBUSY;
820
821 rc = mctp_astlpc_kcs_write(astlpc, MCTP_ASTLPC_KCS_REG_DATA, data);
822 if (rc) {
823 astlpc_prwarn(astlpc, "KCS data write failed");
824 return -EIO;
825 }
826
827 return 0;
828 }
829
mctp_binding_astlpc_tx(struct mctp_binding * b,struct mctp_pktbuf * pkt)830 static int mctp_binding_astlpc_tx(struct mctp_binding *b,
831 struct mctp_pktbuf *pkt)
832 {
833 struct mctp_binding_astlpc *astlpc = binding_to_astlpc(b);
834 uint32_t len, len_be;
835 struct mctp_hdr *hdr;
836 int rc;
837
838 hdr = mctp_pktbuf_hdr(pkt);
839 len = mctp_pktbuf_size(pkt);
840
841 astlpc_prdebug(astlpc,
842 "%s: Transmitting %" PRIu32
843 "-byte packet (%hhu, %hhu, 0x%hhx)",
844 __func__, len, hdr->src, hdr->dest, hdr->flags_seq_tag);
845
846 if (len > astlpc->proto->body_size(astlpc->layout.tx.size)) {
847 astlpc_prwarn(astlpc, "invalid TX len %" PRIu32 ": %" PRIu32,
848 len,
849 astlpc->proto->body_size(astlpc->layout.tx.size));
850 return -EMSGSIZE;
851 }
852
853 mctp_binding_set_tx_enabled(b, false);
854
855 len_be = htobe32(len);
856 mctp_astlpc_lpc_write(astlpc, &len_be, astlpc->layout.tx.offset,
857 sizeof(len_be));
858
859 astlpc->proto->pktbuf_protect(pkt);
860 len = mctp_pktbuf_size(pkt);
861
862 mctp_astlpc_lpc_write(astlpc, hdr, astlpc->layout.tx.offset + 4, len);
863
864 astlpc->layout.tx.state = buffer_state_prepared;
865
866 rc = mctp_astlpc_kcs_send(astlpc, cmd_tx_begin);
867 if (!rc)
868 astlpc->layout.tx.state = buffer_state_released;
869
870 return rc == -EBUSY ? 0 : rc;
871 }
872
mctp_astlpc_calculate_mtu(struct mctp_binding_astlpc * astlpc,struct mctp_astlpc_layout * layout)873 static uint32_t mctp_astlpc_calculate_mtu(struct mctp_binding_astlpc *astlpc,
874 struct mctp_astlpc_layout *layout)
875 {
876 uint32_t low, high, limit, rpkt;
877
878 /* Derive the largest MTU the BMC _can_ support */
879 low = MIN(astlpc->layout.rx.offset, astlpc->layout.tx.offset);
880 high = MAX(astlpc->layout.rx.offset, astlpc->layout.tx.offset);
881 limit = high - low;
882
883 /* Determine the largest MTU the BMC _wants_ to support */
884 if (astlpc->requested_mtu) {
885 uint32_t rmtu = astlpc->requested_mtu;
886
887 rpkt = astlpc->proto->packet_size(MCTP_PACKET_SIZE(rmtu));
888 limit = MIN(limit, rpkt);
889 }
890
891 /* Determine the accepted MTU, applied both directions by convention */
892 rpkt = MIN(limit, layout->tx.size);
893 return MCTP_BODY_SIZE(astlpc->proto->body_size(rpkt));
894 }
895
mctp_astlpc_negotiate_layout_bmc(struct mctp_binding_astlpc * astlpc)896 static int mctp_astlpc_negotiate_layout_bmc(struct mctp_binding_astlpc *astlpc)
897 {
898 struct mctp_astlpc_layout proposed, pending;
899 uint32_t sz, mtu;
900 int rc;
901
902 /* Do we have a valid protocol version? */
903 if (!astlpc->proto->version)
904 return -EINVAL;
905
906 /* Extract the host's proposed layout */
907 rc = mctp_astlpc_layout_read(astlpc, &proposed);
908 if (rc < 0)
909 return rc;
910
911 /* Do we have a reasonable layout? */
912 if (!mctp_astlpc_layout_validate(astlpc, &proposed))
913 return -EINVAL;
914
915 /* Negotiate the MTU */
916 mtu = mctp_astlpc_calculate_mtu(astlpc, &proposed);
917 sz = astlpc->proto->packet_size(MCTP_PACKET_SIZE(mtu));
918
919 /*
920 * Use symmetric MTUs by convention and to pass constraints in rx/tx
921 * functions
922 */
923 pending = astlpc->layout;
924 pending.tx.size = sz;
925 pending.rx.size = sz;
926
927 if (mctp_astlpc_layout_validate(astlpc, &pending)) {
928 /* We found a sensible Rx MTU, so honour it */
929 astlpc->layout = pending;
930
931 /* Enforce the negotiated MTU */
932 rc = mctp_astlpc_layout_write(astlpc, &astlpc->layout);
933 if (rc < 0)
934 return rc;
935
936 astlpc_prinfo(astlpc, "Negotiated an MTU of %" PRIu32 " bytes",
937 mtu);
938 } else {
939 astlpc_prwarn(astlpc, "MTU negotiation failed");
940 return -EINVAL;
941 }
942
943 if (astlpc->proto->version >= 2)
944 astlpc->binding.pkt_size = MCTP_PACKET_SIZE(mtu);
945
946 return 0;
947 }
948
mctp_astlpc_init_channel(struct mctp_binding_astlpc * astlpc)949 static void mctp_astlpc_init_channel(struct mctp_binding_astlpc *astlpc)
950 {
951 uint16_t negotiated, negotiated_be;
952 struct mctp_lpcmap_hdr hdr;
953 uint8_t status;
954 int rc;
955
956 mctp_astlpc_lpc_read(astlpc, &hdr, 0, sizeof(hdr));
957
958 /* Version negotiation */
959 negotiated = mctp_astlpc_negotiate_version(ASTLPC_VER_MIN,
960 ASTLPC_VER_CUR,
961 be16toh(hdr.host_ver_min),
962 be16toh(hdr.host_ver_cur));
963
964 /* MTU negotiation requires knowing which protocol we'll use */
965 assert(negotiated < ARRAY_SIZE(astlpc_protocol_version));
966 astlpc->proto = &astlpc_protocol_version[negotiated];
967
968 /* Host Rx MTU negotiation: Failure terminates channel init */
969 rc = mctp_astlpc_negotiate_layout_bmc(astlpc);
970 if (rc < 0)
971 negotiated = ASTLPC_VER_BAD;
972
973 /* Populate the negotiated version */
974 negotiated_be = htobe16(negotiated);
975 mctp_astlpc_lpc_write(astlpc, &negotiated_be,
976 offsetof(struct mctp_lpcmap_hdr, negotiated_ver),
977 sizeof(negotiated_be));
978
979 /* Track buffer ownership */
980 astlpc->layout.tx.state = buffer_state_acquired;
981 astlpc->layout.rx.state = buffer_state_released;
982
983 /* Finalise the configuration */
984 status = KCS_STATUS_BMC_READY | KCS_STATUS_OBF;
985 if (negotiated > 0) {
986 astlpc_prinfo(astlpc, "Negotiated binding version %" PRIu16,
987 negotiated);
988 status |= KCS_STATUS_CHANNEL_ACTIVE;
989 } else {
990 astlpc_prerr(astlpc, "Failed to initialise channel");
991 }
992
993 mctp_astlpc_kcs_set_status(astlpc, status);
994
995 mctp_binding_set_tx_enabled(&astlpc->binding,
996 status & KCS_STATUS_CHANNEL_ACTIVE);
997 }
998
mctp_astlpc_rx_start(struct mctp_binding_astlpc * astlpc)999 static void mctp_astlpc_rx_start(struct mctp_binding_astlpc *astlpc)
1000 {
1001 struct mctp_pktbuf *pkt;
1002 struct mctp_hdr *hdr;
1003 uint32_t body, packet;
1004
1005 mctp_astlpc_lpc_read(astlpc, &body, astlpc->layout.rx.offset,
1006 sizeof(body));
1007 body = be32toh(body);
1008
1009 if (body > astlpc->proto->body_size(astlpc->layout.rx.size)) {
1010 astlpc_prwarn(astlpc, "invalid RX len 0x%x", body);
1011 return;
1012 }
1013
1014 if ((size_t)body > astlpc->binding.pkt_size) {
1015 astlpc_prwarn(astlpc, "invalid RX len 0x%x", body);
1016 return;
1017 }
1018
1019 /* Eliminate the medium-specific header that we just read */
1020 packet = astlpc->proto->packet_size(body) - 4;
1021 pkt = mctp_pktbuf_alloc(&astlpc->binding, packet);
1022 if (!pkt) {
1023 astlpc_prwarn(astlpc, "unable to allocate pktbuf len 0x%x",
1024 packet);
1025 return;
1026 }
1027
1028 /*
1029 * Read payload and medium-specific trailer from immediately after the
1030 * medium-specific header.
1031 */
1032 mctp_astlpc_lpc_read(astlpc, mctp_pktbuf_hdr(pkt),
1033 astlpc->layout.rx.offset + 4, packet);
1034
1035 astlpc->layout.rx.state = buffer_state_prepared;
1036
1037 /* Inform the other side of the MCTP interface that we have read
1038 * the packet off the bus before handling the contents of the packet.
1039 */
1040 if (!mctp_astlpc_kcs_send(astlpc, cmd_rx_complete))
1041 astlpc->layout.rx.state = buffer_state_released;
1042
1043 hdr = mctp_pktbuf_hdr(pkt);
1044 if (hdr->ver != 1) {
1045 mctp_pktbuf_free(pkt);
1046 astlpc_prdebug(astlpc, "Dropped packet with invalid version");
1047 return;
1048 }
1049
1050 /*
1051 * v3 will validate the CRC32 in the medium-specific trailer and adjust
1052 * the packet size accordingly. On older protocols validation is a no-op
1053 * that always returns true.
1054 */
1055 if (astlpc->proto->pktbuf_validate(pkt)) {
1056 mctp_bus_rx(&astlpc->binding, pkt);
1057 } else {
1058 /* TODO: Drop any associated assembly */
1059 mctp_pktbuf_free(pkt);
1060 astlpc_prdebug(astlpc, "Dropped corrupt packet");
1061 }
1062 }
1063
mctp_astlpc_tx_complete(struct mctp_binding_astlpc * astlpc)1064 static void mctp_astlpc_tx_complete(struct mctp_binding_astlpc *astlpc)
1065 {
1066 astlpc->layout.tx.state = buffer_state_acquired;
1067 mctp_binding_set_tx_enabled(&astlpc->binding, true);
1068 }
1069
mctp_astlpc_finalise_channel(struct mctp_binding_astlpc * astlpc)1070 static int mctp_astlpc_finalise_channel(struct mctp_binding_astlpc *astlpc)
1071 {
1072 struct mctp_astlpc_layout layout;
1073 uint16_t negotiated;
1074 int rc;
1075
1076 rc = mctp_astlpc_lpc_read(astlpc, &negotiated,
1077 offsetof(struct mctp_lpcmap_hdr,
1078 negotiated_ver),
1079 sizeof(negotiated));
1080 if (rc < 0)
1081 return rc;
1082
1083 negotiated = be16toh(negotiated);
1084 astlpc_prerr(astlpc, "Version negotiation got: %u", negotiated);
1085
1086 if (negotiated == ASTLPC_VER_BAD || negotiated < ASTLPC_VER_MIN ||
1087 negotiated > ASTLPC_VER_CUR) {
1088 astlpc_prerr(astlpc, "Failed to negotiate version, got: %u\n",
1089 negotiated);
1090 return -EINVAL;
1091 }
1092
1093 assert(negotiated < ARRAY_SIZE(astlpc_protocol_version));
1094 astlpc->proto = &astlpc_protocol_version[negotiated];
1095
1096 rc = mctp_astlpc_layout_read(astlpc, &layout);
1097 if (rc < 0)
1098 return rc;
1099
1100 if (!mctp_astlpc_layout_validate(astlpc, &layout)) {
1101 mctp_prerr("BMC proposed invalid buffer parameters");
1102 return -EINVAL;
1103 }
1104
1105 astlpc->layout = layout;
1106
1107 if (negotiated >= 2)
1108 astlpc->binding.pkt_size =
1109 astlpc->proto->body_size(astlpc->layout.tx.size);
1110
1111 /* Track buffer ownership */
1112 astlpc->layout.tx.state = buffer_state_acquired;
1113 astlpc->layout.rx.state = buffer_state_released;
1114
1115 return 0;
1116 }
1117
mctp_astlpc_update_channel(struct mctp_binding_astlpc * astlpc,uint8_t status)1118 static int mctp_astlpc_update_channel(struct mctp_binding_astlpc *astlpc,
1119 uint8_t status)
1120 {
1121 uint8_t updated;
1122 int rc = 0;
1123
1124 assert(astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST);
1125
1126 updated = astlpc->kcs_status ^ status;
1127
1128 astlpc_prdebug(astlpc, "%s: status: 0x%x, update: 0x%x", __func__,
1129 status, updated);
1130
1131 if (updated & KCS_STATUS_BMC_READY) {
1132 if (status & KCS_STATUS_BMC_READY) {
1133 astlpc->kcs_status = status;
1134 return astlpc->binding.start(&astlpc->binding);
1135 } else {
1136 /* Shut down the channel */
1137 astlpc->layout.rx.state = buffer_state_idle;
1138 astlpc->layout.tx.state = buffer_state_idle;
1139 mctp_binding_set_tx_enabled(&astlpc->binding, false);
1140 }
1141 }
1142
1143 if (astlpc->proto->version == 0 ||
1144 updated & KCS_STATUS_CHANNEL_ACTIVE) {
1145 bool enable;
1146
1147 astlpc->layout.rx.state = buffer_state_idle;
1148 astlpc->layout.tx.state = buffer_state_idle;
1149 rc = mctp_astlpc_finalise_channel(astlpc);
1150 enable = (status & KCS_STATUS_CHANNEL_ACTIVE) && rc == 0;
1151 mctp_binding_set_tx_enabled(&astlpc->binding, enable);
1152 }
1153
1154 astlpc->kcs_status = status;
1155
1156 return rc;
1157 }
1158
mctp_astlpc_tx_done(struct mctp_binding_astlpc * astlpc)1159 bool mctp_astlpc_tx_done(struct mctp_binding_astlpc *astlpc)
1160 {
1161 return astlpc->layout.tx.state == buffer_state_acquired;
1162 }
1163
mctp_astlpc_poll(struct mctp_binding_astlpc * astlpc)1164 int mctp_astlpc_poll(struct mctp_binding_astlpc *astlpc)
1165 {
1166 uint8_t status, data;
1167 int rc;
1168
1169 if (astlpc->layout.rx.state == buffer_state_prepared)
1170 if (!mctp_astlpc_kcs_send(astlpc, cmd_rx_complete))
1171 astlpc->layout.rx.state = buffer_state_released;
1172
1173 if (astlpc->layout.tx.state == buffer_state_prepared)
1174 if (!mctp_astlpc_kcs_send(astlpc, cmd_tx_begin))
1175 astlpc->layout.tx.state = buffer_state_released;
1176
1177 rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_STATUS, &status);
1178 if (rc) {
1179 astlpc_prwarn(astlpc, "KCS read error");
1180 return -1;
1181 }
1182
1183 astlpc_prdebug(astlpc, "%s: status: 0x%hhx", __func__, status);
1184
1185 if (!mctp_astlpc_kcs_read_ready(astlpc, status))
1186 return 0;
1187
1188 rc = mctp_astlpc_kcs_read(astlpc, MCTP_ASTLPC_KCS_REG_DATA, &data);
1189 if (rc) {
1190 astlpc_prwarn(astlpc, "KCS data read error");
1191 return -1;
1192 }
1193
1194 astlpc_prdebug(astlpc, "%s: data: 0x%hhx", __func__, data);
1195
1196 if (!astlpc->proto->version &&
1197 !(data == cmd_initialise || data == cmd_dummy_value)) {
1198 astlpc_prwarn(astlpc, "Invalid message for binding state: 0x%x",
1199 data);
1200 return 0;
1201 }
1202
1203 switch (data) {
1204 case cmd_initialise:
1205 mctp_astlpc_init_channel(astlpc);
1206 break;
1207 case cmd_tx_begin:
1208 if (astlpc->layout.rx.state != buffer_state_released) {
1209 astlpc_prerr(
1210 astlpc,
1211 "Protocol error: Invalid Rx buffer state for event %d: %d\n",
1212 data, astlpc->layout.rx.state);
1213 return 0;
1214 }
1215 mctp_astlpc_rx_start(astlpc);
1216 break;
1217 case cmd_rx_complete:
1218 if (astlpc->layout.tx.state != buffer_state_released) {
1219 astlpc_prerr(
1220 astlpc,
1221 "Protocol error: Invalid Tx buffer state for event %d: %d\n",
1222 data, astlpc->layout.tx.state);
1223 return 0;
1224 }
1225 mctp_astlpc_tx_complete(astlpc);
1226 break;
1227 case cmd_dummy_value:
1228 /* No responsibilities for the BMC on 0xff */
1229 if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST) {
1230 rc = mctp_astlpc_update_channel(astlpc, status);
1231 if (rc < 0)
1232 return rc;
1233 }
1234 break;
1235 default:
1236 astlpc_prwarn(astlpc, "unknown message 0x%x", data);
1237 }
1238
1239 /* Handle silent loss of bmc-ready */
1240 if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_HOST) {
1241 if (!(status & KCS_STATUS_BMC_READY && data == cmd_dummy_value))
1242 return mctp_astlpc_update_channel(astlpc, status);
1243 }
1244
1245 return rc;
1246 }
1247
1248 /* allocate and basic initialisation */
__mctp_astlpc_init(uint8_t mode,uint32_t mtu)1249 static struct mctp_binding_astlpc *__mctp_astlpc_init(uint8_t mode,
1250 uint32_t mtu)
1251 {
1252 struct mctp_binding_astlpc *astlpc;
1253
1254 assert((mode == MCTP_BINDING_ASTLPC_MODE_BMC) ||
1255 (mode == MCTP_BINDING_ASTLPC_MODE_HOST));
1256
1257 astlpc = __mctp_alloc(sizeof(*astlpc));
1258 if (!astlpc)
1259 return NULL;
1260
1261 memset(astlpc, 0, sizeof(*astlpc));
1262 astlpc->mode = mode;
1263 astlpc->lpc_map = NULL;
1264 astlpc->layout.rx.state = buffer_state_idle;
1265 astlpc->layout.tx.state = buffer_state_idle;
1266 astlpc->requested_mtu = mtu;
1267 astlpc->binding.name = "astlpc";
1268 astlpc->binding.version = 1;
1269 astlpc->binding.pkt_size =
1270 MCTP_PACKET_SIZE(mtu > MCTP_BTU ? mtu : MCTP_BTU);
1271 astlpc->binding.pkt_header = 4;
1272 astlpc->binding.pkt_trailer = 4;
1273 astlpc->binding.tx = mctp_binding_astlpc_tx;
1274 if (mode == MCTP_BINDING_ASTLPC_MODE_BMC)
1275 astlpc->binding.start = mctp_binding_astlpc_start_bmc;
1276 else if (mode == MCTP_BINDING_ASTLPC_MODE_HOST)
1277 astlpc->binding.start = mctp_binding_astlpc_start_host;
1278 else {
1279 astlpc_prerr(astlpc, "%s: Invalid mode: %d\n", __func__, mode);
1280 __mctp_free(astlpc);
1281 return NULL;
1282 }
1283
1284 return astlpc;
1285 }
1286
mctp_binding_astlpc_core(struct mctp_binding_astlpc * b)1287 struct mctp_binding *mctp_binding_astlpc_core(struct mctp_binding_astlpc *b)
1288 {
1289 return &b->binding;
1290 }
1291
1292 struct mctp_binding_astlpc *
mctp_astlpc_init(uint8_t mode,uint32_t mtu,void * lpc_map,const struct mctp_binding_astlpc_ops * ops,void * ops_data)1293 mctp_astlpc_init(uint8_t mode, uint32_t mtu, void *lpc_map,
1294 const struct mctp_binding_astlpc_ops *ops, void *ops_data)
1295 {
1296 struct mctp_binding_astlpc *astlpc;
1297
1298 if (!(mode == MCTP_BINDING_ASTLPC_MODE_BMC ||
1299 mode == MCTP_BINDING_ASTLPC_MODE_HOST)) {
1300 mctp_prerr("Unknown binding mode: %u", mode);
1301 return NULL;
1302 }
1303
1304 astlpc = __mctp_astlpc_init(mode, mtu);
1305 if (!astlpc)
1306 return NULL;
1307
1308 memcpy(&astlpc->ops, ops, sizeof(astlpc->ops));
1309 astlpc->ops_data = ops_data;
1310 astlpc->lpc_map = lpc_map;
1311 astlpc->mode = mode;
1312
1313 return astlpc;
1314 }
1315
1316 struct mctp_binding_astlpc *
mctp_astlpc_init_ops(const struct mctp_binding_astlpc_ops * ops,void * ops_data,void * lpc_map)1317 mctp_astlpc_init_ops(const struct mctp_binding_astlpc_ops *ops, void *ops_data,
1318 void *lpc_map)
1319 {
1320 return mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, MCTP_BTU, lpc_map,
1321 ops, ops_data);
1322 }
1323
mctp_astlpc_destroy(struct mctp_binding_astlpc * astlpc)1324 void mctp_astlpc_destroy(struct mctp_binding_astlpc *astlpc)
1325 {
1326 /* Clear channel-active and bmc-ready */
1327 if (astlpc->mode == MCTP_BINDING_ASTLPC_MODE_BMC)
1328 mctp_astlpc_kcs_set_status(astlpc, 0);
1329 __mctp_free(astlpc);
1330 }
1331
1332 #ifdef MCTP_HAVE_FILEIO
1333
mctp_astlpc_init_fileio_lpc(struct mctp_binding_astlpc * astlpc)1334 static int mctp_astlpc_init_fileio_lpc(struct mctp_binding_astlpc *astlpc)
1335 {
1336 struct aspeed_lpc_ctrl_mapping map = {
1337 .window_type = ASPEED_LPC_CTRL_WINDOW_MEMORY,
1338 .window_id = 0, /* There's only one */
1339 .flags = 0,
1340 .addr = 0,
1341 .offset = 0,
1342 .size = 0
1343 };
1344 void *lpc_map_base;
1345 int fd, rc;
1346
1347 fd = open(lpc_path, O_RDWR | O_SYNC);
1348 if (fd < 0) {
1349 astlpc_prwarn(astlpc, "LPC open (%s) failed", lpc_path);
1350 return -1;
1351 }
1352
1353 rc = ioctl(fd, ASPEED_LPC_CTRL_IOCTL_GET_SIZE, &map);
1354 if (rc) {
1355 astlpc_prwarn(astlpc, "LPC GET_SIZE failed");
1356 close(fd);
1357 return -1;
1358 }
1359
1360 /*
1361 *
1362 *
1363 * Decouple ourselves from hiomapd[1] (another user of the FW2AHB) by
1364 * mapping the FW2AHB to the reserved memory here as well.
1365 *
1366 * It's not possible to use the MCTP ASTLPC binding on machines that
1367 * need the FW2AHB bridge mapped anywhere except to the reserved memory
1368 * (e.g. the host SPI NOR).
1369 *
1370 * [1] https://github.com/openbmc/hiomapd/
1371 *
1372 *
1373 *
1374 * The following calculation must align with what's going on in
1375 * hiomapd's lpc.c so as not to disrupt its behaviour:
1376 *
1377 * https://github.com/openbmc/hiomapd/blob/5ff50e3cbd7702aefc185264e4adfb9952040575/lpc.c#L68
1378 *
1379 *
1380 */
1381
1382 /* Map the reserved memory at the top of the 28-bit LPC firmware address space */
1383 map.addr = 0x0FFFFFFF & -map.size;
1384 astlpc_prinfo(
1385 astlpc,
1386 "Configuring FW2AHB to map reserved memory at 0x%08x for 0x%x in the LPC FW cycle address-space",
1387 map.addr, map.size);
1388
1389 rc = ioctl(fd, ASPEED_LPC_CTRL_IOCTL_MAP, &map);
1390 if (rc) {
1391 astlpc_prwarn(astlpc,
1392 "Failed to map FW2AHB to reserved memory");
1393 close(fd);
1394 return -1;
1395 }
1396
1397 /* Map the reserved memory into our address space */
1398 lpc_map_base =
1399 mmap(NULL, map.size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1400 if (lpc_map_base == MAP_FAILED) {
1401 astlpc_prwarn(astlpc, "LPC mmap failed");
1402 rc = -1;
1403 } else {
1404 astlpc->lpc_map = lpc_map_base + map.size - LPC_WIN_SIZE;
1405 }
1406
1407 close(fd);
1408
1409 return rc;
1410 }
1411
mctp_astlpc_init_fileio_kcs(struct mctp_binding_astlpc * astlpc,const char * kcs_path)1412 static int mctp_astlpc_init_fileio_kcs(struct mctp_binding_astlpc *astlpc,
1413 const char *kcs_path)
1414 {
1415 astlpc->kcs_fd = open(kcs_path, O_RDWR);
1416 if (astlpc->kcs_fd < 0)
1417 return -1;
1418
1419 return 0;
1420 }
1421
__mctp_astlpc_fileio_kcs_read(void * arg,enum mctp_binding_astlpc_kcs_reg reg,uint8_t * val)1422 static int __mctp_astlpc_fileio_kcs_read(void *arg,
1423 enum mctp_binding_astlpc_kcs_reg reg,
1424 uint8_t *val)
1425 {
1426 struct mctp_binding_astlpc *astlpc = arg;
1427 off_t offset = reg;
1428 int rc;
1429
1430 rc = pread(astlpc->kcs_fd, val, 1, offset);
1431
1432 return rc == 1 ? 0 : -1;
1433 }
1434
__mctp_astlpc_fileio_kcs_write(void * arg,enum mctp_binding_astlpc_kcs_reg reg,uint8_t val)1435 static int __mctp_astlpc_fileio_kcs_write(void *arg,
1436 enum mctp_binding_astlpc_kcs_reg reg,
1437 uint8_t val)
1438 {
1439 struct mctp_binding_astlpc *astlpc = arg;
1440 off_t offset = reg;
1441 int rc;
1442
1443 rc = pwrite(astlpc->kcs_fd, &val, 1, offset);
1444
1445 return rc == 1 ? 0 : -1;
1446 }
1447
mctp_astlpc_init_pollfd(struct mctp_binding_astlpc * astlpc,struct pollfd * pollfd)1448 int mctp_astlpc_init_pollfd(struct mctp_binding_astlpc *astlpc,
1449 struct pollfd *pollfd)
1450 {
1451 bool release;
1452
1453 pollfd->fd = astlpc->kcs_fd;
1454 pollfd->events = 0;
1455
1456 release = astlpc->layout.rx.state == buffer_state_prepared ||
1457 astlpc->layout.tx.state == buffer_state_prepared;
1458
1459 pollfd->events = release ? POLLOUT : POLLIN;
1460
1461 return 0;
1462 }
1463
mctp_astlpc_init_fileio(const char * kcs_path)1464 struct mctp_binding_astlpc *mctp_astlpc_init_fileio(const char *kcs_path)
1465 {
1466 struct mctp_binding_astlpc *astlpc;
1467 int rc;
1468
1469 /*
1470 * If we're doing file IO then we're very likely not running
1471 * freestanding, so lets assume that we're on the BMC side.
1472 *
1473 * Requesting an MTU of 0 requests the largest possible MTU, whatever
1474 * value that might take.
1475 */
1476 astlpc = __mctp_astlpc_init(MCTP_BINDING_ASTLPC_MODE_BMC, 0);
1477 if (!astlpc)
1478 return NULL;
1479
1480 /* Set internal operations for kcs. We use direct accesses to the lpc
1481 * map area */
1482 astlpc->ops.kcs_read = __mctp_astlpc_fileio_kcs_read;
1483 astlpc->ops.kcs_write = __mctp_astlpc_fileio_kcs_write;
1484 astlpc->ops_data = astlpc;
1485
1486 rc = mctp_astlpc_init_fileio_lpc(astlpc);
1487 if (rc) {
1488 free(astlpc);
1489 return NULL;
1490 }
1491
1492 rc = mctp_astlpc_init_fileio_kcs(astlpc, kcs_path);
1493 if (rc) {
1494 free(astlpc);
1495 return NULL;
1496 }
1497
1498 return astlpc;
1499 }
1500 #else
1501 struct mctp_binding_astlpc *
mctp_astlpc_init_fileio(const char * kcs_path __unused)1502 mctp_astlpc_init_fileio(const char *kcs_path __unused)
1503 {
1504 mctp_prlog(MCTP_LOG_ERR, "%s: Missing support for file IO", __func__);
1505 return NULL;
1506 }
1507
mctp_astlpc_init_pollfd(struct mctp_binding_astlpc * astlpc __unused,struct pollfd * pollfd __unused)1508 int mctp_astlpc_init_pollfd(struct mctp_binding_astlpc *astlpc __unused,
1509 struct pollfd *pollfd __unused)
1510 {
1511 mctp_prlog(MCTP_LOG_ERR, "%s: Missing support for file IO", __func__);
1512 return -1;
1513 }
1514 #endif
1515