1 // SPDX-License-Identifier: Apache-2.0 2 // Copyright (C) 2018 IBM Corp. 3 4 #define _GNU_SOURCE 5 #include <assert.h> 6 #include <errno.h> 7 #include <fcntl.h> 8 #include <getopt.h> 9 #include <limits.h> 10 #include <poll.h> 11 #include <stdbool.h> 12 #include <stdint.h> 13 #include <stdio.h> 14 #include <stdlib.h> 15 #include <string.h> 16 #include <syslog.h> 17 #include <signal.h> 18 #include <sys/ioctl.h> 19 #include <sys/mman.h> 20 #include <sys/stat.h> 21 #include <sys/timerfd.h> 22 #include <sys/types.h> 23 #include <time.h> 24 #include <unistd.h> 25 #include <inttypes.h> 26 27 #include "mboxd.h" 28 #include "common.h" 29 #include "transport_mbox.h" 30 #include "windows.h" 31 #include "lpc.h" 32 33 #pragma GCC diagnostic push 34 #pragma GCC diagnostic ignored "-Wpointer-arith" 35 36 struct errno_map { 37 int rc; 38 int mbox_errno; 39 }; 40 41 static const struct errno_map errno_map_v1[] = { 42 { 0, MBOX_R_SUCCESS }, 43 { EACCES, MBOX_R_PARAM_ERROR }, 44 { EBADMSG, MBOX_R_PARAM_ERROR }, 45 { EBUSY, MBOX_R_SYSTEM_ERROR }, 46 { EINVAL, MBOX_R_PARAM_ERROR }, 47 { ENOTSUP, MBOX_R_PARAM_ERROR }, 48 { EPERM, MBOX_R_PARAM_ERROR }, 49 { EPROTO, MBOX_R_PARAM_ERROR }, 50 { ETIMEDOUT, MBOX_R_TIMEOUT }, 51 { -1, MBOX_R_SYSTEM_ERROR }, 52 }; 53 54 static const struct errno_map errno_map_v2[] = { 55 { 0, MBOX_R_SUCCESS }, 56 { EACCES, MBOX_R_WINDOW_ERROR }, 57 { EBADMSG, MBOX_R_SEQ_ERROR }, 58 { EBUSY, MBOX_R_BUSY }, 59 { EINVAL, MBOX_R_PARAM_ERROR }, 60 { ENOTSUP, MBOX_R_PARAM_ERROR }, 61 { EPERM, MBOX_R_WINDOW_ERROR }, 62 { EPROTO, MBOX_R_PARAM_ERROR }, 63 { ETIMEDOUT, MBOX_R_TIMEOUT }, 64 { -1, MBOX_R_SYSTEM_ERROR }, 65 }; 66 67 static const struct errno_map *errno_maps[] = { 68 [0] = NULL, 69 [1] = errno_map_v1, 70 [2] = errno_map_v2, 71 }; 72 73 static inline int mbox_xlate_errno(struct mbox_context *context, 74 int rc) 75 { 76 const struct errno_map *entry; 77 78 rc = -rc; 79 MSG_DBG("Translating errno %d: %s\n", rc, strerror(rc)); 80 for(entry = errno_maps[context->version]; entry->rc != -1; entry++) { 81 if (rc == entry->rc) { 82 return entry->mbox_errno; 83 } 84 } 85 86 return entry->mbox_errno; 87 } 88 89 /* 90 * transport_mbox_flush_events() - Write to the BMC controlled status register 91 * (reg 15) 92 * @context: The mbox context pointer 93 * 94 * Return: 0 on success otherwise negative error code 95 */ 96 static int transport_mbox_flush_events(struct mbox_context *context, uint8_t events) 97 { 98 int rc; 99 100 /* Seek mbox registers */ 101 rc = lseek(context->fds[MBOX_FD].fd, MBOX_BMC_EVENT, SEEK_SET); 102 if (rc != MBOX_BMC_EVENT) { 103 MSG_ERR("Couldn't lseek mbox to byte %d: %s\n", MBOX_BMC_EVENT, 104 strerror(errno)); 105 return -errno; 106 } 107 108 /* Write to mbox status register */ 109 rc = write(context->fds[MBOX_FD].fd, &events, 1); 110 if (rc != 1) { 111 MSG_ERR("Couldn't write to BMC status reg: %s\n", 112 strerror(errno)); 113 return -errno; 114 } 115 116 /* Reset to start */ 117 rc = lseek(context->fds[MBOX_FD].fd, 0, SEEK_SET); 118 if (rc) { 119 MSG_ERR("Couldn't reset MBOX offset to zero: %s\n", 120 strerror(errno)); 121 return -errno; 122 } 123 124 return 0; 125 } 126 127 static int transport_mbox_put_events(struct mbox_context *context, 128 uint8_t mask) 129 { 130 return transport_mbox_flush_events(context, context->bmc_events & mask); 131 } 132 133 static int transport_mbox_update_events(struct mbox_context *context, 134 uint8_t events __attribute__((unused)), 135 uint8_t mask) 136 { 137 return transport_mbox_flush_events(context, context->bmc_events & mask); 138 } 139 140 static const struct transport_ops transport_mbox_ops = { 141 .put_events = transport_mbox_put_events, 142 .set_events = transport_mbox_update_events, 143 .clear_events = transport_mbox_update_events, 144 }; 145 146 /* Command Handlers */ 147 148 /* 149 * Command: RESET_STATE 150 * Reset the LPC mapping to point back at the flash, or memory in case we're 151 * using a virtual pnor. 152 */ 153 static int mbox_handle_reset(struct mbox_context *context, 154 union mbox_regs *req __attribute__((unused)), 155 struct mbox_msg *resp __attribute__((unused))) 156 { 157 return context->protocol->reset(context); 158 } 159 160 /* 161 * Command: GET_MBOX_INFO 162 * Get the API version, default window size and block size 163 * We also set the LPC mapping to point to the reserved memory region here so 164 * this command must be called before any window manipulation 165 * 166 * V1: 167 * ARGS[0]: API Version 168 * 169 * RESP[0]: API Version 170 * RESP[1:2]: Default read window size (number of blocks) 171 * RESP[3:4]: Default write window size (number of blocks) 172 * RESP[5]: Block size (as shift) 173 * 174 * V2: 175 * ARGS[0]: API Version 176 * 177 * RESP[0]: API Version 178 * RESP[1:2]: Default read window size (number of blocks) 179 * RESP[3:4]: Default write window size (number of blocks) 180 * RESP[5]: Block size (as shift) 181 */ 182 static int mbox_handle_mbox_info(struct mbox_context *context, 183 union mbox_regs *req, struct mbox_msg *resp) 184 { 185 uint8_t mbox_api_version = req->msg.args[0]; 186 struct protocol_get_info io = { 187 .req = { .api_version = mbox_api_version } 188 }; 189 int rc; 190 191 rc = context->protocol->get_info(context, &io); 192 if (rc < 0) { 193 return rc; 194 } 195 196 /* 197 * Switch transport to mbox, however we need to delay flushing the 198 * event state until after the command is processed. 199 */ 200 context->transport = &transport_mbox_ops; 201 202 resp->args[0] = io.resp.api_version; 203 if (io.resp.api_version == API_VERSION_1) { 204 put_u16(&resp->args[1], io.resp.v1.read_window_size); 205 put_u16(&resp->args[3], io.resp.v1.write_window_size); 206 } else if (io.resp.api_version >= API_VERSION_2) { 207 resp->args[5] = io.resp.v2.block_size_shift; 208 put_u16(&resp->args[6], io.resp.v2.timeout); 209 } 210 211 return 0; 212 } 213 214 /* 215 * Command: GET_FLASH_INFO 216 * Get the flash size and erase granularity 217 * 218 * V1: 219 * RESP[0:3]: Flash Size (bytes) 220 * RESP[4:7]: Erase Size (bytes) 221 * V2: 222 * RESP[0:1]: Flash Size (number of blocks) 223 * RESP[2:3]: Erase Size (number of blocks) 224 */ 225 static int mbox_handle_flash_info(struct mbox_context *context, 226 union mbox_regs *req __attribute__((unused)), 227 struct mbox_msg *resp) 228 { 229 struct protocol_get_flash_info io; 230 int rc; 231 232 rc = context->protocol->get_flash_info(context, &io); 233 if (rc < 0) { 234 return rc; 235 } 236 237 switch (context->version) { 238 case API_VERSION_1: 239 /* Both Sizes in Bytes */ 240 put_u32(&resp->args[0], io.resp.v1.flash_size); 241 put_u32(&resp->args[4], io.resp.v1.erase_size); 242 break; 243 case API_VERSION_2: 244 /* Both Sizes in Block Size */ 245 put_u16(&resp->args[0], io.resp.v2.flash_size); 246 put_u16(&resp->args[2], io.resp.v2.erase_size); 247 break; 248 default: 249 MSG_ERR("API Version Not Valid - Invalid System State\n"); 250 return -MBOX_R_SYSTEM_ERROR; 251 } 252 253 return 0; 254 } 255 256 /* 257 * get_lpc_addr_shifted() - Get lpc address of the current window 258 * @context: The mbox context pointer 259 * 260 * Return: The lpc address to access that offset shifted by block size 261 */ 262 static inline uint16_t get_lpc_addr_shifted(struct mbox_context *context) 263 { 264 uint32_t lpc_addr, mem_offset; 265 266 /* Offset of the current window in the reserved memory region */ 267 mem_offset = context->current->mem - context->mem; 268 /* Total LPC Address */ 269 lpc_addr = context->lpc_base + mem_offset; 270 271 MSG_DBG("LPC address of current window: 0x%.8x\n", lpc_addr); 272 273 return lpc_addr >> context->backend.block_size_shift; 274 } 275 276 static int mbox_handle_create_window(struct mbox_context *context, bool ro, 277 union mbox_regs *req, struct mbox_msg *resp) 278 { 279 struct protocol_create_window io; 280 int rc; 281 282 io.req.offset = get_u16(&req->msg.args[0]); 283 io.req.ro = ro; 284 285 rc = context->protocol->create_window(context, &io); 286 if (rc < 0) { 287 return rc; 288 } 289 290 put_u16(&resp->args[0], io.resp.lpc_address); 291 if (context->version >= API_VERSION_2) { 292 put_u16(&resp->args[2], io.resp.size); 293 put_u16(&resp->args[4], io.resp.offset); 294 } 295 296 return 0; 297 } 298 299 /* 300 * Command: CREATE_READ_WINDOW 301 * Opens a read window 302 * First checks if any current window with the requested data, if so we just 303 * point the host to that. Otherwise we read the request data in from flash and 304 * point the host there. 305 * 306 * V1: 307 * ARGS[0:1]: Window Location as Offset into Flash (number of blocks) 308 * 309 * RESP[0:1]: LPC bus address for host to access this window (number of blocks) 310 * 311 * V2: 312 * ARGS[0:1]: Window Location as Offset into Flash (number of blocks) 313 * ARGS[2:3]: Requested window size (number of blocks) 314 * 315 * RESP[0:1]: LPC bus address for host to access this window (number of blocks) 316 * RESP[2:3]: Actual window size that the host can access (number of blocks) 317 */ 318 static int mbox_handle_read_window(struct mbox_context *context, 319 union mbox_regs *req, struct mbox_msg *resp) 320 { 321 return mbox_handle_create_window(context, true, req, resp); 322 } 323 324 /* 325 * Command: CREATE_WRITE_WINDOW 326 * Opens a write window 327 * First checks if any current window with the requested data, if so we just 328 * point the host to that. Otherwise we read the request data in from flash and 329 * point the host there. 330 * 331 * V1: 332 * ARGS[0:1]: Window Location as Offset into Flash (number of blocks) 333 * 334 * RESP[0:1]: LPC bus address for host to access this window (number of blocks) 335 * 336 * V2: 337 * ARGS[0:1]: Window Location as Offset into Flash (number of blocks) 338 * ARGS[2:3]: Requested window size (number of blocks) 339 * 340 * RESP[0:1]: LPC bus address for host to access this window (number of blocks) 341 * RESP[2:3]: Actual window size that was mapped/host can access (n.o. blocks) 342 */ 343 static int mbox_handle_write_window(struct mbox_context *context, 344 union mbox_regs *req, struct mbox_msg *resp) 345 { 346 return mbox_handle_create_window(context, false, req, resp); 347 } 348 349 /* 350 * Commands: MARK_WRITE_DIRTY 351 * Marks a portion of the current (write) window dirty, informing the daemon 352 * that is has been written to and thus must be at some point written to the 353 * backing store 354 * These changes aren't written back to the backing store unless flush is then 355 * called or the window closed 356 * 357 * V1: 358 * ARGS[0:1]: Where within flash to start (number of blocks) 359 * ARGS[2:5]: Number to mark dirty (number of bytes) 360 * 361 * V2: 362 * ARGS[0:1]: Where within window to start (number of blocks) 363 * ARGS[2:3]: Number to mark dirty (number of blocks) 364 */ 365 static int mbox_handle_dirty_window(struct mbox_context *context, 366 union mbox_regs *req, 367 struct mbox_msg *resp __attribute__((unused))) 368 { 369 struct protocol_mark_dirty io; 370 371 if (context->version == API_VERSION_1) { 372 io.req.v1.offset = get_u16(&req->msg.args[0]); 373 io.req.v1.size = get_u32(&req->msg.args[2]); 374 } else { 375 io.req.v2.offset = get_u16(&req->msg.args[0]); 376 io.req.v2.size = get_u16(&req->msg.args[2]); 377 } 378 379 return context->protocol->mark_dirty(context, &io); 380 } 381 382 /* 383 * Commands: MARK_WRITE_ERASE 384 * Erases a portion of the current window 385 * These changes aren't written back to the backing store unless flush is then 386 * called or the window closed 387 * 388 * V1: 389 * Unimplemented 390 * 391 * V2: 392 * ARGS[0:1]: Where within window to start (number of blocks) 393 * ARGS[2:3]: Number to erase (number of blocks) 394 */ 395 static int mbox_handle_erase_window(struct mbox_context *context, 396 union mbox_regs *req, 397 struct mbox_msg *resp __attribute__((unused))) 398 { 399 struct protocol_erase io; 400 401 io.req.offset = get_u16(&req->msg.args[0]); 402 io.req.size = get_u16(&req->msg.args[2]); 403 404 if (!context->protocol->erase) { 405 MSG_ERR("Protocol Version invalid for Erase Command\n"); 406 return -ENOTSUP; 407 } 408 409 return context->protocol->erase(context, &io); 410 } 411 412 /* 413 * Command: WRITE_FLUSH 414 * Flushes any dirty or erased blocks in the current window back to the backing 415 * store 416 * NOTE: For V1 this behaves much the same as the dirty command in that it 417 * takes an offset and number of blocks to dirty, then also performs a flush as 418 * part of the same command. For V2 this will only flush blocks already marked 419 * dirty/erased with the appropriate commands and doesn't take any arguments 420 * directly. 421 * 422 * V1: 423 * ARGS[0:1]: Where within window to start (number of blocks) 424 * ARGS[2:5]: Number to mark dirty (number of bytes) 425 * 426 * V2: 427 * NONE 428 */ 429 static int mbox_handle_flush_window(struct mbox_context *context, 430 union mbox_regs *req, 431 struct mbox_msg *resp __attribute__((unused))) 432 { 433 struct protocol_flush io = { 0 }; 434 435 if (context->version == API_VERSION_1) { 436 io.req.offset = get_u16(&req->msg.args[0]); 437 io.req.size = get_u32(&req->msg.args[2]); 438 } 439 440 return context->protocol->flush(context, &io); 441 } 442 443 /* 444 * Command: CLOSE_WINDOW 445 * Close the current window 446 * NOTE: There is an implicit flush 447 * 448 * V1: 449 * NONE 450 * 451 * V2: 452 * ARGS[0]: FLAGS 453 */ 454 static int mbox_handle_close_window(struct mbox_context *context, 455 union mbox_regs *req, 456 struct mbox_msg *resp __attribute__((unused))) 457 { 458 struct protocol_close io = { 0 }; 459 460 if (context->version >= API_VERSION_2) { 461 io.req.flags = req->msg.args[0]; 462 } 463 464 return context->protocol->close(context, &io); 465 } 466 467 /* 468 * Command: BMC_EVENT_ACK 469 * Sent by the host to acknowledge BMC events supplied in mailbox register 15 470 * 471 * ARGS[0]: Bitmap of bits to ack (by clearing) 472 */ 473 static int mbox_handle_ack(struct mbox_context *context, union mbox_regs *req, 474 struct mbox_msg *resp __attribute__((unused))) 475 { 476 struct protocol_ack io; 477 478 io.req.flags = req->msg.args[0]; 479 480 return context->protocol->ack(context, &io); 481 } 482 483 /* 484 * check_req_valid() - Check if the given request is a valid mbox request 485 * @context: The mbox context pointer 486 * @cmd: The request registers 487 * 488 * Return: 0 if request is valid otherwise negative error code 489 */ 490 static int check_req_valid(struct mbox_context *context, union mbox_regs *req) 491 { 492 uint8_t cmd = req->msg.command; 493 uint8_t seq = req->msg.seq; 494 495 if (cmd > NUM_MBOX_CMDS) { 496 MSG_ERR("Unknown mbox command: %d\n", cmd); 497 return -ENOTSUP; 498 } 499 500 if (seq == context->prev_seq && cmd != MBOX_C_GET_MBOX_INFO) { 501 MSG_ERR("Invalid sequence number: %d, previous: %d\n", seq, 502 context->prev_seq); 503 return -EBADMSG; 504 } 505 506 if (context->state & STATE_SUSPENDED) { 507 if (cmd != MBOX_C_GET_MBOX_INFO && cmd != MBOX_C_ACK) { 508 MSG_ERR("Cannot use that cmd while suspended: %d\n", 509 cmd); 510 return -EBUSY; 511 } 512 } 513 514 if (context->transport != &transport_mbox_ops) { 515 if (cmd != MBOX_C_RESET_STATE && cmd != MBOX_C_GET_MBOX_INFO) { 516 MSG_ERR("Cannot switch transport with command %d\n", 517 cmd); 518 return -EPROTO; 519 } 520 } 521 522 if (!(context->state & MAPS_MEM)) { 523 if (cmd != MBOX_C_RESET_STATE && cmd != MBOX_C_GET_MBOX_INFO 524 && cmd != MBOX_C_ACK) { 525 MSG_ERR("Must call GET_MBOX_INFO before %d\n", cmd); 526 return -EPROTO; 527 } 528 } 529 530 return 0; 531 } 532 533 typedef int (*mboxd_mbox_handler)(struct mbox_context *, union mbox_regs *, 534 struct mbox_msg *); 535 536 static const mboxd_mbox_handler transport_mbox_handlers[] = { 537 mbox_handle_reset, 538 mbox_handle_mbox_info, 539 mbox_handle_flash_info, 540 mbox_handle_read_window, 541 mbox_handle_close_window, 542 mbox_handle_write_window, 543 mbox_handle_dirty_window, 544 mbox_handle_flush_window, 545 mbox_handle_ack, 546 mbox_handle_erase_window 547 }; 548 549 /* 550 * handle_mbox_req() - Handle an incoming mbox command request 551 * @context: The mbox context pointer 552 * @req: The mbox request message 553 * 554 * Return: 0 if handled successfully otherwise negative error code 555 */ 556 static int handle_mbox_req(struct mbox_context *context, union mbox_regs *req) 557 { 558 const struct transport_ops *old_transport = context->transport; 559 struct mbox_msg resp = { 560 .command = req->msg.command, 561 .seq = req->msg.seq, 562 .args = { 0 }, 563 .response = MBOX_R_SUCCESS 564 }; 565 int rc = 0, len, i; 566 567 MSG_INFO("Received MBOX command: %u\n", req->msg.command); 568 569 rc = check_req_valid(context, req); 570 if (!rc) { 571 mboxd_mbox_handler handler; 572 573 /* Commands start at 1 so we have to subtract 1 from the cmd */ 574 handler = transport_mbox_handlers[req->msg.command - 1]; 575 rc = handler(context, req, &resp); 576 if (rc < 0) { 577 MSG_ERR("Error handling mbox cmd: %d\n", 578 req->msg.command); 579 } 580 } 581 582 rc = mbox_xlate_errno(context, rc); 583 resp.response = rc; 584 context->prev_seq = req->msg.seq; 585 586 MSG_DBG("Writing MBOX response:\n"); 587 MSG_DBG("MBOX cmd: %u\n", resp.command); 588 MSG_DBG("MBOX seq: %u\n", resp.seq); 589 for (i = 0; i < MBOX_ARGS_BYTES; i++) { 590 MSG_DBG("MBOX arg[%d]: 0x%.2x\n", i, resp.args[i]); 591 } 592 MSG_INFO("Writing MBOX response: %u\n", resp.response); 593 len = write(context->fds[MBOX_FD].fd, &resp, sizeof(resp)); 594 if (len < (ssize_t)sizeof(resp)) { 595 MSG_ERR("Didn't write the full response\n"); 596 rc = -errno; 597 } 598 599 if (context->transport != old_transport && 600 context->transport == &transport_mbox_ops) { 601 /* A bit messy, but we need the correct event mask */ 602 protocol_events_set(context, context->bmc_events); 603 } 604 605 return rc; 606 } 607 608 /* 609 * get_message() - Read an mbox request message from the mbox registers 610 * @context: The mbox context pointer 611 * @msg: Where to put the received message 612 * 613 * Return: 0 if read successfully otherwise negative error code 614 */ 615 static int get_message(struct mbox_context *context, union mbox_regs *msg) 616 { 617 int rc, i; 618 619 rc = read(context->fds[MBOX_FD].fd, msg, sizeof(msg->raw)); 620 if (rc < 0) { 621 MSG_ERR("Couldn't read: %s\n", strerror(errno)); 622 return -errno; 623 } else if (rc < (ssize_t)sizeof(msg->raw)) { 624 MSG_ERR("Short read: %d expecting %zu\n", rc, sizeof(msg->raw)); 625 return -1; 626 } 627 628 MSG_DBG("Received MBOX request:\n"); 629 MSG_DBG("MBOX cmd: %u\n", msg->msg.command); 630 MSG_DBG("MBOX seq: %u\n", msg->msg.seq); 631 for (i = 0; i < MBOX_ARGS_BYTES; i++) { 632 MSG_DBG("MBOX arg[%d]: 0x%.2x\n", i, msg->msg.args[i]); 633 } 634 635 return 0; 636 } 637 638 /* 639 * transport_mbox_dispatch() - handle an mbox interrupt 640 * @context: The mbox context pointer 641 * 642 * Return: 0 if handled successfully otherwise negative error code 643 */ 644 int transport_mbox_dispatch(struct mbox_context *context) 645 { 646 int rc = 0; 647 union mbox_regs req = { 0 }; 648 649 assert(context); 650 651 rc = get_message(context, &req); 652 if (rc) { 653 return rc; 654 } 655 656 return handle_mbox_req(context, &req); 657 } 658 659 int __transport_mbox_init(struct mbox_context *context, const char *path, 660 const struct transport_ops **ops) 661 { 662 int fd; 663 664 /* Open MBOX Device */ 665 fd = open(path, O_RDWR | O_NONBLOCK); 666 if (fd < 0) { 667 MSG_INFO("Couldn't open %s with flags O_RDWR: %s\n", 668 path, strerror(errno)); 669 return -errno; 670 } 671 MSG_DBG("Opened mbox dev: %s\n", path); 672 673 context->fds[MBOX_FD].fd = fd; 674 675 if (ops) { 676 *ops = &transport_mbox_ops; 677 } 678 679 return 0; 680 } 681 682 int transport_mbox_init(struct mbox_context *context, 683 const struct transport_ops **ops) 684 { 685 int rc; 686 687 rc = __transport_mbox_init(context, MBOX_HOST_PATH, ops); 688 if (rc) 689 return rc; 690 691 return 0; 692 } 693 694 void transport_mbox_free(struct mbox_context *context) 695 { 696 close(context->fds[MBOX_FD].fd); 697 } 698 699 #pragma GCC diagnostic pop 700