1 // SPDX-License-Identifier: Apache-2.0 2 // Copyright (C) 2018 IBM Corp. 3 #include "config.h" 4 5 #include <errno.h> 6 #include <stdint.h> 7 #include <stdio.h> 8 #include <unistd.h> 9 10 #include "backend.h" 11 #include "common.h" 12 #include "lpc.h" 13 #include "mboxd.h" 14 #include "protocol.h" 15 #include "windows.h" 16 17 18 #define BLOCK_SIZE_SHIFT_V1 12 /* 4K */ 19 20 static inline uint8_t protocol_get_bmc_event_mask(struct mbox_context *context) 21 { 22 if (context->version == API_VERSION_1) { 23 return BMC_EVENT_V1_MASK; 24 } 25 26 return BMC_EVENT_V2_MASK; 27 } 28 29 /* 30 * protocol_events_put() - Push the full set/cleared state of BMC events on the 31 * provided transport 32 * @context: The mbox context pointer 33 * @ops: The operations struct for the transport of interest 34 * 35 * Return: 0 on success otherwise negative error code 36 */ 37 int protocol_events_put(struct mbox_context *context, 38 const struct transport_ops *ops) 39 { 40 const uint8_t mask = protocol_get_bmc_event_mask(context); 41 42 return ops->put_events(context, mask); 43 } 44 45 /* 46 * protocol_events_set() - Update the set BMC events on the active transport 47 * @context: The mbox context pointer 48 * @bmc_event: The bits to set 49 * 50 * Return: 0 on success otherwise negative error code 51 */ 52 int protocol_events_set(struct mbox_context *context, uint8_t bmc_event) 53 { 54 const uint8_t mask = protocol_get_bmc_event_mask(context); 55 56 /* 57 * Store the raw value, as we may up- or down- grade the protocol 58 * version and subsequently need to flush the appropriate set. Instead 59 * we pass the masked value through to the transport 60 */ 61 context->bmc_events |= bmc_event; 62 63 return context->transport->set_events(context, bmc_event, mask); 64 } 65 66 /* 67 * protocol_events_clear() - Update the cleared BMC events on the active 68 * transport 69 * @context: The mbox context pointer 70 * @bmc_event: The bits to clear 71 * 72 * Return: 0 on success otherwise negative error code 73 */ 74 int protocol_events_clear(struct mbox_context *context, uint8_t bmc_event) 75 { 76 const uint8_t mask = protocol_get_bmc_event_mask(context); 77 78 context->bmc_events &= ~bmc_event; 79 80 return context->transport->clear_events(context, bmc_event, mask); 81 } 82 83 static int protocol_negotiate_version(struct mbox_context *context, 84 uint8_t requested); 85 86 static int protocol_v1_reset(struct mbox_context *context) 87 { 88 return __protocol_reset(context); 89 } 90 91 static int protocol_negotiate_version(struct mbox_context *context, 92 uint8_t requested); 93 94 static int protocol_v1_get_info(struct mbox_context *context, 95 struct protocol_get_info *io) 96 { 97 uint8_t old_version = context->version; 98 int rc; 99 100 /* Bootstrap protocol version. This may involve {up,down}grading */ 101 rc = protocol_negotiate_version(context, io->req.api_version); 102 if (rc < 0) 103 return rc; 104 105 /* Do the {up,down}grade if necessary*/ 106 if (rc != old_version) { 107 /* Doing version negotiation, don't alert host to reset */ 108 windows_reset_all(context); 109 return context->protocol->get_info(context, io); 110 } 111 112 /* Record the negotiated version for the response */ 113 io->resp.api_version = rc; 114 115 /* Now do all required intialisation for v1 */ 116 context->backend.block_size_shift = BLOCK_SIZE_SHIFT_V1; 117 MSG_INFO("Block Size: 0x%.8x (shift: %u)\n", 118 1 << context->backend.block_size_shift, context->backend.block_size_shift); 119 120 /* Knowing blocksize we can allocate the window dirty_bytemap */ 121 windows_alloc_dirty_bytemap(context); 122 123 io->resp.v1.read_window_size = 124 context->windows.default_size >> context->backend.block_size_shift; 125 io->resp.v1.write_window_size = 126 context->windows.default_size >> context->backend.block_size_shift; 127 128 return lpc_map_memory(context); 129 } 130 131 static int protocol_v1_get_flash_info(struct mbox_context *context, 132 struct protocol_get_flash_info *io) 133 { 134 io->resp.v1.flash_size = context->backend.flash_size; 135 io->resp.v1.erase_size = 1 << context->backend.erase_size_shift; 136 137 return 0; 138 } 139 140 /* 141 * get_lpc_addr_shifted() - Get lpc address of the current window 142 * @context: The mbox context pointer 143 * 144 * Return: The lpc address to access that offset shifted by block size 145 */ 146 static inline uint16_t get_lpc_addr_shifted(struct mbox_context *context) 147 { 148 uint32_t lpc_addr, mem_offset; 149 150 /* Offset of the current window in the reserved memory region */ 151 mem_offset = context->current->mem - context->mem; 152 /* Total LPC Address */ 153 lpc_addr = context->lpc_base + mem_offset; 154 155 MSG_DBG("LPC address of current window: 0x%.8x\n", lpc_addr); 156 157 return lpc_addr >> context->backend.block_size_shift; 158 } 159 160 static inline int64_t blktrace_gettime(void) 161 { 162 struct timespec ts; 163 int64_t n; 164 165 clock_gettime(CLOCK_REALTIME, &ts); 166 n = (int64_t)(ts.tv_sec) * (int64_t)1000000000 + (int64_t)(ts.tv_nsec); 167 168 return n; 169 } 170 171 static void blktrace_flush_start(struct mbox_context *context) 172 { 173 struct blk_io_trace *trace = &context->trace; 174 struct timespec now; 175 176 if (!context->blktracefd) 177 return; 178 179 if (!context->blktrace_start) { 180 clock_gettime(CLOCK_REALTIME, &now); 181 context->blktrace_start = blktrace_gettime(); 182 } 183 184 trace->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; 185 trace->sequence++; 186 trace->time = blktrace_gettime() - context->blktrace_start; 187 trace->sector = context->current->flash_offset / 512; 188 trace->bytes = context->current->size; 189 if (context->current_is_write) 190 trace->action = BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_WRITE); 191 else 192 trace->action = BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_READ); 193 trace->pid = 0; 194 trace->device = 0; 195 trace->cpu = 0; 196 trace->error = 0; 197 trace->pdu_len = 0; 198 write(context->blktracefd, trace, sizeof(*trace)); 199 trace->sequence++; 200 trace->time = blktrace_gettime() - context->blktrace_start; 201 trace->action &= ~BLK_TA_QUEUE; 202 trace->action |= BLK_TA_ISSUE; 203 write(context->blktracefd, trace, sizeof(*trace)); 204 } 205 206 static void blktrace_flush_done(struct mbox_context *context) 207 { 208 struct blk_io_trace *trace = &context->trace; 209 210 if (!context->blktracefd) 211 return; 212 213 trace->sequence++; 214 trace->time = blktrace_gettime() - context->blktrace_start; 215 trace->action &= ~BLK_TA_ISSUE; 216 trace->action |= BLK_TA_COMPLETE; 217 write(context->blktracefd, trace, sizeof(*trace)); 218 } 219 220 static void blktrace_window_start(struct mbox_context *context) 221 { 222 struct blk_io_trace *trace = &context->trace; 223 224 if (!context->blktracefd) 225 return; 226 227 if (!context->blktrace_start) 228 context->blktrace_start = blktrace_gettime(); 229 230 trace->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; 231 trace->sequence++; 232 trace->time = blktrace_gettime() - context->blktrace_start; 233 trace->action = BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_READ); 234 trace->pid = 0; 235 trace->device = 0; 236 trace->cpu = 0; 237 trace->error = 0; 238 trace->pdu_len = 0; 239 } 240 241 static void blktrace_window_done(struct mbox_context *context) 242 { 243 struct blk_io_trace *trace = &context->trace; 244 245 if (!context->blktracefd) 246 return; 247 248 trace->sector = context->current->flash_offset / 512; 249 trace->bytes = context->current->size; 250 write(context->blktracefd, trace, sizeof(*trace)); 251 trace->sequence++; 252 trace->action &= ~BLK_TA_QUEUE; 253 trace->action |= BLK_TA_ISSUE; 254 write(context->blktracefd, trace, sizeof(*trace)); 255 256 trace->sequence++; 257 trace->time = blktrace_gettime() - context->blktrace_start; 258 trace->action &= ~BLK_TA_ISSUE; 259 trace->action |= BLK_TA_COMPLETE; 260 write(context->blktracefd, trace, sizeof(*trace)); 261 } 262 263 static int protocol_v1_create_window(struct mbox_context *context, 264 struct protocol_create_window *io) 265 { 266 struct backend *backend = &context->backend; 267 uint32_t offset; 268 uint32_t size; 269 int rc; 270 271 offset = io->req.offset << backend->block_size_shift; 272 size = io->req.size << backend->block_size_shift; 273 rc = backend_validate(backend, offset, size, io->req.ro); 274 if (rc < 0) { 275 /* Backend does not allow window to be created. */ 276 return rc; 277 } 278 279 /* Close the current window if there is one */ 280 if (context->current) { 281 /* There is an implicit flush if it was a write window 282 * 283 * protocol_v2_create_window() calls 284 * protocol_v1_create_window(), so use indirect call to 285 * write_flush() to make sure we pick the right one. 286 */ 287 if (context->current_is_write) { 288 blktrace_flush_start(context); 289 rc = context->protocol->flush(context, NULL); 290 blktrace_flush_done(context); 291 if (rc < 0) { 292 MSG_ERR("Couldn't Flush Write Window\n"); 293 return rc; 294 } 295 } 296 windows_close_current(context, FLAGS_NONE); 297 } 298 299 /* Offset the host has requested */ 300 MSG_INFO("Host requested flash @ 0x%.8x\n", offset); 301 /* Check if we have an existing window */ 302 blktrace_window_start(context); 303 context->current = windows_search(context, offset, 304 context->version == API_VERSION_1); 305 306 if (!context->current) { /* No existing window */ 307 MSG_DBG("No existing window which maps that flash offset\n"); 308 rc = windows_create_map(context, &context->current, 309 offset, 310 context->version == API_VERSION_1); 311 if (rc < 0) { /* Unable to map offset */ 312 MSG_ERR("Couldn't create window mapping for offset 0x%.8x\n", 313 offset); 314 return rc; 315 } 316 } 317 blktrace_window_done(context); 318 319 context->current_is_write = !io->req.ro; 320 321 MSG_INFO("Window @ %p for size 0x%.8x maps flash offset 0x%.8x\n", 322 context->current->mem, context->current->size, 323 context->current->flash_offset); 324 325 io->resp.lpc_address = get_lpc_addr_shifted(context); 326 327 return 0; 328 } 329 330 static int protocol_v1_mark_dirty(struct mbox_context *context, 331 struct protocol_mark_dirty *io) 332 { 333 uint32_t offset = io->req.v1.offset; 334 uint32_t size = io->req.v1.size; 335 uint32_t off; 336 337 if (!(context->current && context->current_is_write)) { 338 MSG_ERR("Tried to call mark dirty without open write window\n"); 339 return -EPERM; 340 } 341 342 /* For V1 offset given relative to flash - we want the window */ 343 off = offset - ((context->current->flash_offset) >> 344 context->backend.block_size_shift); 345 if (off > offset) { /* Underflow - before current window */ 346 MSG_ERR("Tried to mark dirty before start of window\n"); 347 MSG_ERR("requested offset: 0x%x window start: 0x%x\n", 348 offset << context->backend.block_size_shift, 349 context->current->flash_offset); 350 return -EINVAL; 351 } 352 offset = off; 353 /* 354 * We only track dirty at the block level. 355 * For protocol V1 we can get away with just marking the whole 356 * block dirty. 357 */ 358 size = align_up(size, 1 << context->backend.block_size_shift); 359 size >>= context->backend.block_size_shift; 360 361 MSG_INFO("Dirty window @ 0x%.8x for 0x%.8x\n", 362 offset << context->backend.block_size_shift, 363 size << context->backend.block_size_shift); 364 365 return window_set_bytemap(context, context->current, offset, size, 366 WINDOW_DIRTY); 367 } 368 369 static int generic_flush(struct mbox_context *context) 370 { 371 int rc, i, offset, count; 372 uint8_t prev; 373 374 offset = 0; 375 count = 0; 376 prev = WINDOW_CLEAN; 377 378 MSG_INFO("Flush window @ %p for size 0x%.8x which maps flash @ 0x%.8x\n", 379 context->current->mem, context->current->size, 380 context->current->flash_offset); 381 382 /* 383 * We look for streaks of the same type and keep a count, when the type 384 * (dirty/erased) changes we perform the required action on the backing 385 * store and update the current streak-type 386 */ 387 for (i = 0; i < (context->current->size >> context->backend.block_size_shift); 388 i++) { 389 uint8_t cur = context->current->dirty_bmap[i]; 390 if (cur != WINDOW_CLEAN) { 391 if (cur == prev) { /* Same as previous block, incrmnt */ 392 count++; 393 } else if (prev == WINDOW_CLEAN) { /* Start of run */ 394 offset = i; 395 count++; 396 } else { /* Change in streak type */ 397 rc = window_flush(context, offset, count, 398 prev); 399 if (rc < 0) { 400 return rc; 401 } 402 offset = i; 403 count = 1; 404 } 405 } else { 406 if (prev != WINDOW_CLEAN) { /* End of a streak */ 407 rc = window_flush(context, offset, count, 408 prev); 409 if (rc < 0) { 410 return rc; 411 } 412 offset = 0; 413 count = 0; 414 } 415 } 416 prev = cur; 417 } 418 419 if (prev != WINDOW_CLEAN) { /* Still the last streak to write */ 420 rc = window_flush(context, offset, count, prev); 421 if (rc < 0) { 422 return rc; 423 } 424 } 425 426 /* Clear the dirty bytemap since we have written back all changes */ 427 return window_set_bytemap(context, context->current, 0, 428 context->current->size >> 429 context->backend.block_size_shift, 430 WINDOW_CLEAN); 431 } 432 433 static int protocol_v1_flush(struct mbox_context *context, 434 struct protocol_flush *io) 435 { 436 int rc; 437 438 if (!(context->current && context->current_is_write)) { 439 MSG_ERR("Tried to call flush without open write window\n"); 440 return -EPERM; 441 } 442 443 /* 444 * For V1 the Flush command acts much the same as the dirty command 445 * except with a flush as well. Only do this on an actual flush 446 * command not when we call flush because we've implicitly closed a 447 * window because we might not have the required args in req. 448 */ 449 if (io) { 450 struct protocol_mark_dirty *mdio = (void *)io; 451 rc = protocol_v1_mark_dirty(context, mdio); 452 if (rc < 0) { 453 return rc; 454 } 455 } 456 457 return generic_flush(context); 458 } 459 460 static int protocol_v1_close(struct mbox_context *context, 461 struct protocol_close *io) 462 { 463 int rc; 464 465 /* Close the current window if there is one */ 466 if (!context->current) { 467 return 0; 468 } 469 470 /* There is an implicit flush if it was a write window */ 471 if (context->current_is_write) { 472 rc = protocol_v1_flush(context, NULL); 473 if (rc < 0) { 474 MSG_ERR("Couldn't Flush Write Window\n"); 475 return rc; 476 } 477 } 478 479 /* Host asked for it -> Don't set the BMC Event */ 480 windows_close_current(context, io->req.flags); 481 482 return 0; 483 } 484 485 static int protocol_v1_ack(struct mbox_context *context, 486 struct protocol_ack *io) 487 { 488 return protocol_events_clear(context, 489 (io->req.flags & BMC_EVENT_ACK_MASK)); 490 } 491 492 /* 493 * get_suggested_timeout() - get the suggested timeout value in seconds 494 * @context: The mbox context pointer 495 * 496 * Return: Suggested timeout in seconds 497 */ 498 static uint16_t get_suggested_timeout(struct mbox_context *context) 499 { 500 struct window_context *window = windows_find_largest(context); 501 uint32_t max_size_mb = window ? (window->size >> 20) : 0; 502 uint16_t ret; 503 504 ret = align_up(max_size_mb * FLASH_ACCESS_MS_PER_MB, 1000) / 1000; 505 506 MSG_DBG("Suggested Timeout: %us, max window size: %uMB, for %dms/MB\n", 507 ret, max_size_mb, FLASH_ACCESS_MS_PER_MB); 508 return ret; 509 } 510 511 static int protocol_v2_get_info(struct mbox_context *context, 512 struct protocol_get_info *io) 513 { 514 uint8_t old_version = context->version; 515 int rc; 516 517 /* Bootstrap protocol version. This may involve {up,down}grading */ 518 rc = protocol_negotiate_version(context, io->req.api_version); 519 if (rc < 0) 520 return rc; 521 522 /* Do the {up,down}grade if necessary*/ 523 if (rc != old_version) { 524 /* Doing version negotiation, don't alert host to reset */ 525 windows_reset_all(context); 526 return context->protocol->get_info(context, io); 527 } 528 529 /* Record the negotiated version for the response */ 530 io->resp.api_version = rc; 531 532 /* Now do all required intialisation for v2 */ 533 534 /* Knowing blocksize we can allocate the window dirty_bytemap */ 535 windows_alloc_dirty_bytemap(context); 536 537 io->resp.v2.block_size_shift = context->backend.block_size_shift; 538 MSG_INFO("Block Size: 0x%.8x (shift: %u)\n", 539 1 << context->backend.block_size_shift, context->backend.block_size_shift); 540 541 io->resp.v2.timeout = get_suggested_timeout(context); 542 543 return lpc_map_memory(context); 544 } 545 546 static int protocol_v2_get_flash_info(struct mbox_context *context, 547 struct protocol_get_flash_info *io) 548 { 549 struct backend *backend = &context->backend; 550 551 io->resp.v2.flash_size = 552 backend->flash_size >> backend->block_size_shift; 553 io->resp.v2.erase_size = 554 ((1 << backend->erase_size_shift) >> backend->block_size_shift); 555 556 return 0; 557 } 558 559 static int protocol_v2_create_window(struct mbox_context *context, 560 struct protocol_create_window *io) 561 { 562 int rc; 563 564 rc = protocol_v1_create_window(context, io); 565 if (rc < 0) 566 return rc; 567 568 io->resp.size = context->current->size >> context->backend.block_size_shift; 569 io->resp.offset = context->current->flash_offset >> 570 context->backend.block_size_shift; 571 572 return 0; 573 } 574 575 static int protocol_v2_mark_dirty(struct mbox_context *context, 576 struct protocol_mark_dirty *io) 577 { 578 if (!(context->current && context->current_is_write)) { 579 MSG_ERR("Tried to call mark dirty without open write window\n"); 580 return -EPERM; 581 } 582 583 MSG_INFO("Dirty window @ 0x%.8x for 0x%.8x\n", 584 io->req.v2.offset << context->backend.block_size_shift, 585 io->req.v2.size << context->backend.block_size_shift); 586 587 return window_set_bytemap(context, context->current, io->req.v2.offset, 588 io->req.v2.size, WINDOW_DIRTY); 589 } 590 591 static int protocol_v2_erase(struct mbox_context *context, 592 struct protocol_erase *io) 593 { 594 size_t start, len; 595 int rc; 596 597 if (!(context->current && context->current_is_write)) { 598 MSG_ERR("Tried to call erase without open write window\n"); 599 return -EPERM; 600 } 601 602 MSG_INFO("Erase window @ 0x%.8x for 0x%.8x\n", 603 io->req.offset << context->backend.block_size_shift, 604 io->req.size << context->backend.block_size_shift); 605 606 rc = window_set_bytemap(context, context->current, io->req.offset, 607 io->req.size, WINDOW_ERASED); 608 if (rc < 0) { 609 return rc; 610 } 611 612 /* Write 0xFF to mem -> This ensures consistency between flash & ram */ 613 start = io->req.offset << context->backend.block_size_shift; 614 len = io->req.size << context->backend.block_size_shift; 615 memset(context->current->mem + start, 0xFF, len); 616 617 return 0; 618 } 619 620 static int protocol_v2_flush(struct mbox_context *context, 621 struct protocol_flush *io) 622 { 623 if (!(context->current && context->current_is_write)) { 624 MSG_ERR("Tried to call flush without open write window\n"); 625 return -EPERM; 626 } 627 628 return generic_flush(context); 629 } 630 631 static int protocol_v2_close(struct mbox_context *context, 632 struct protocol_close *io) 633 { 634 int rc; 635 636 /* Close the current window if there is one */ 637 if (!context->current) { 638 return 0; 639 } 640 641 /* There is an implicit flush if it was a write window */ 642 if (context->current_is_write) { 643 rc = protocol_v2_flush(context, NULL); 644 if (rc < 0) { 645 MSG_ERR("Couldn't Flush Write Window\n"); 646 return rc; 647 } 648 } 649 650 /* Host asked for it -> Don't set the BMC Event */ 651 windows_close_current(context, io->req.flags); 652 653 return 0; 654 } 655 656 static const struct protocol_ops protocol_ops_v1 = { 657 .reset = protocol_v1_reset, 658 .get_info = protocol_v1_get_info, 659 .get_flash_info = protocol_v1_get_flash_info, 660 .create_window = protocol_v1_create_window, 661 .mark_dirty = protocol_v1_mark_dirty, 662 .erase = NULL, 663 .flush = protocol_v1_flush, 664 .close = protocol_v1_close, 665 .ack = protocol_v1_ack, 666 }; 667 668 static const struct protocol_ops protocol_ops_v2 = { 669 .reset = protocol_v1_reset, 670 .get_info = protocol_v2_get_info, 671 .get_flash_info = protocol_v2_get_flash_info, 672 .create_window = protocol_v2_create_window, 673 .mark_dirty = protocol_v2_mark_dirty, 674 .erase = protocol_v2_erase, 675 .flush = protocol_v2_flush, 676 .close = protocol_v2_close, 677 .ack = protocol_v1_ack, 678 }; 679 680 static const struct protocol_ops *protocol_ops_map[] = { 681 [0] = NULL, 682 [1] = &protocol_ops_v1, 683 [2] = &protocol_ops_v2, 684 }; 685 686 static int protocol_negotiate_version(struct mbox_context *context, 687 uint8_t requested) 688 { 689 /* Check we support the version requested */ 690 if (requested < API_MIN_VERSION) 691 return -EINVAL; 692 693 context->version = (requested > API_MAX_VERSION) ? 694 API_MAX_VERSION : requested; 695 696 context->protocol = protocol_ops_map[context->version]; 697 698 return context->version; 699 } 700 701 int protocol_init(struct mbox_context *context) 702 { 703 protocol_negotiate_version(context, API_MAX_VERSION); 704 705 return 0; 706 } 707 708 void protocol_free(struct mbox_context *context) 709 { 710 return; 711 } 712 713 /* Don't do any state manipulation, just perform the reset */ 714 int __protocol_reset(struct mbox_context *context) 715 { 716 enum backend_reset_mode mode; 717 int rc; 718 719 windows_reset_all(context); 720 721 rc = backend_reset(&context->backend, context->mem, context->mem_size); 722 if (rc < 0) 723 return rc; 724 725 mode = rc; 726 if (!(mode == reset_lpc_flash || mode == reset_lpc_memory)) 727 return -EINVAL; 728 729 if (mode == reset_lpc_flash) 730 return lpc_map_flash(context); 731 732 assert(mode == reset_lpc_memory); 733 return lpc_map_memory(context); 734 } 735 736 /* Prevent the host from performing actions whilst reset takes place */ 737 int protocol_reset(struct mbox_context *context) 738 { 739 int rc; 740 741 rc = protocol_events_clear(context, BMC_EVENT_DAEMON_READY); 742 if (rc < 0) { 743 MSG_ERR("Failed to clear daemon ready state, reset failed\n"); 744 return rc; 745 } 746 747 rc = __protocol_reset(context); 748 if (rc < 0) { 749 MSG_ERR("Failed to reset protocol, daemon remains not ready\n"); 750 return rc; 751 } 752 753 rc = protocol_events_set(context, 754 BMC_EVENT_DAEMON_READY | BMC_EVENT_PROTOCOL_RESET); 755 if (rc < 0) { 756 MSG_ERR("Failed to set daemon ready state, daemon remains not ready\n"); 757 return rc; 758 } 759 760 return 0; 761 } 762