1 // SPDX-License-Identifier: Apache-2.0
2 // Copyright (C) 2018 IBM Corp.
3 #include "config.h"
4
5 #include <errno.h>
6 #include <stdint.h>
7 #include <stdio.h>
8 #include <unistd.h>
9
10 #include "backend.h"
11 #include "common.h"
12 #include "lpc.h"
13 #include "mboxd.h"
14 #include "protocol.h"
15 #include "windows.h"
16
17 #pragma GCC diagnostic push
18 #pragma GCC diagnostic ignored "-Wpointer-arith"
19 #pragma GCC diagnostic ignored "-Wunused-result"
20
21 #define BLOCK_SIZE_SHIFT_V1 12 /* 4K */
22
protocol_get_bmc_event_mask(struct mbox_context * context)23 static inline uint8_t protocol_get_bmc_event_mask(struct mbox_context *context)
24 {
25 if (context->version == API_VERSION_1) {
26 return BMC_EVENT_V1_MASK;
27 }
28
29 return BMC_EVENT_V2_MASK;
30 }
31
32 /*
33 * protocol_events_put() - Push the full set/cleared state of BMC events on the
34 * provided transport
35 * @context: The mbox context pointer
36 * @ops: The operations struct for the transport of interest
37 *
38 * Return: 0 on success otherwise negative error code
39 */
protocol_events_put(struct mbox_context * context,const struct transport_ops * ops)40 int protocol_events_put(struct mbox_context *context,
41 const struct transport_ops *ops)
42 {
43 const uint8_t mask = protocol_get_bmc_event_mask(context);
44
45 return ops->put_events(context, mask);
46 }
47
48 /*
49 * protocol_events_set() - Update the set BMC events on the active transport
50 * @context: The mbox context pointer
51 * @bmc_event: The bits to set
52 *
53 * Return: 0 on success otherwise negative error code
54 */
protocol_events_set(struct mbox_context * context,uint8_t bmc_event)55 int protocol_events_set(struct mbox_context *context, uint8_t bmc_event)
56 {
57 const uint8_t mask = protocol_get_bmc_event_mask(context);
58
59 /*
60 * Store the raw value, as we may up- or down- grade the protocol
61 * version and subsequently need to flush the appropriate set. Instead
62 * we pass the masked value through to the transport
63 */
64 context->bmc_events |= bmc_event;
65
66 return context->transport->set_events(context, bmc_event, mask);
67 }
68
69 /*
70 * protocol_events_clear() - Update the cleared BMC events on the active
71 * transport
72 * @context: The mbox context pointer
73 * @bmc_event: The bits to clear
74 *
75 * Return: 0 on success otherwise negative error code
76 */
protocol_events_clear(struct mbox_context * context,uint8_t bmc_event)77 int protocol_events_clear(struct mbox_context *context, uint8_t bmc_event)
78 {
79 const uint8_t mask = protocol_get_bmc_event_mask(context);
80
81 context->bmc_events &= ~bmc_event;
82
83 return context->transport->clear_events(context, bmc_event, mask);
84 }
85
86 static int protocol_negotiate_version(struct mbox_context *context,
87 uint8_t requested);
88
protocol_v1_reset(struct mbox_context * context)89 static int protocol_v1_reset(struct mbox_context *context)
90 {
91 return __protocol_reset(context);
92 }
93
94 static int protocol_negotiate_version(struct mbox_context *context,
95 uint8_t requested);
96
protocol_v1_get_info(struct mbox_context * context,struct protocol_get_info * io)97 static int protocol_v1_get_info(struct mbox_context *context,
98 struct protocol_get_info *io)
99 {
100 uint8_t old_version = context->version;
101 int rc;
102
103 /* Bootstrap protocol version. This may involve {up,down}grading */
104 rc = protocol_negotiate_version(context, io->req.api_version);
105 if (rc < 0)
106 return rc;
107
108 /* Do the {up,down}grade if necessary*/
109 if (rc != old_version) {
110 /* Doing version negotiation, don't alert host to reset */
111 windows_reset_all(context);
112 return context->protocol->get_info(context, io);
113 }
114
115 /* Record the negotiated version for the response */
116 io->resp.api_version = rc;
117
118 /* Now do all required intialisation for v1 */
119 context->backend.block_size_shift = BLOCK_SIZE_SHIFT_V1;
120 MSG_INFO("Block Size: 0x%.8x (shift: %u)\n",
121 1 << context->backend.block_size_shift, context->backend.block_size_shift);
122
123 /* Knowing blocksize we can allocate the window dirty_bytemap */
124 windows_alloc_dirty_bytemap(context);
125
126 io->resp.v1.read_window_size =
127 context->windows.default_size >> context->backend.block_size_shift;
128 io->resp.v1.write_window_size =
129 context->windows.default_size >> context->backend.block_size_shift;
130
131 return lpc_map_memory(context);
132 }
133
protocol_v1_get_flash_info(struct mbox_context * context,struct protocol_get_flash_info * io)134 static int protocol_v1_get_flash_info(struct mbox_context *context,
135 struct protocol_get_flash_info *io)
136 {
137 io->resp.v1.flash_size = context->backend.flash_size;
138 io->resp.v1.erase_size = 1 << context->backend.erase_size_shift;
139
140 return 0;
141 }
142
143 /*
144 * get_lpc_addr_shifted() - Get lpc address of the current window
145 * @context: The mbox context pointer
146 *
147 * Return: The lpc address to access that offset shifted by block size
148 */
get_lpc_addr_shifted(struct mbox_context * context)149 static inline uint16_t get_lpc_addr_shifted(struct mbox_context *context)
150 {
151 uint32_t lpc_addr, mem_offset;
152
153 /* Offset of the current window in the reserved memory region */
154 mem_offset = context->current->mem - context->mem;
155 /* Total LPC Address */
156 lpc_addr = context->lpc_base + mem_offset;
157
158 MSG_DBG("LPC address of current window: 0x%.8x\n", lpc_addr);
159
160 return lpc_addr >> context->backend.block_size_shift;
161 }
162
blktrace_gettime(void)163 static inline int64_t blktrace_gettime(void)
164 {
165 struct timespec ts;
166 int64_t n;
167
168 clock_gettime(CLOCK_REALTIME, &ts);
169 n = (int64_t)(ts.tv_sec) * (int64_t)1000000000 + (int64_t)(ts.tv_nsec);
170
171 return n;
172 }
173
blktrace_flush_start(struct mbox_context * context)174 static void blktrace_flush_start(struct mbox_context *context)
175 {
176 struct blk_io_trace *trace = &context->trace;
177 struct timespec now;
178
179 if (!context->blktracefd)
180 return;
181
182 if (!context->blktrace_start) {
183 clock_gettime(CLOCK_REALTIME, &now);
184 context->blktrace_start = blktrace_gettime();
185 }
186
187 trace->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
188 trace->sequence++;
189 trace->time = blktrace_gettime() - context->blktrace_start;
190 trace->sector = context->current->flash_offset / 512;
191 trace->bytes = context->current->size;
192 if (context->current_is_write)
193 trace->action = BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_WRITE);
194 else
195 trace->action = BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_READ);
196 trace->pid = 0;
197 trace->device = 0;
198 trace->cpu = 0;
199 trace->error = 0;
200 trace->pdu_len = 0;
201 write(context->blktracefd, trace, sizeof(*trace));
202 trace->sequence++;
203 trace->time = blktrace_gettime() - context->blktrace_start;
204 trace->action &= ~BLK_TA_QUEUE;
205 trace->action |= BLK_TA_ISSUE;
206 write(context->blktracefd, trace, sizeof(*trace));
207 }
208
blktrace_flush_done(struct mbox_context * context)209 static void blktrace_flush_done(struct mbox_context *context)
210 {
211 struct blk_io_trace *trace = &context->trace;
212
213 if (!context->blktracefd)
214 return;
215
216 trace->sequence++;
217 trace->time = blktrace_gettime() - context->blktrace_start;
218 trace->action &= ~BLK_TA_ISSUE;
219 trace->action |= BLK_TA_COMPLETE;
220 write(context->blktracefd, trace, sizeof(*trace));
221 }
222
blktrace_window_start(struct mbox_context * context)223 static void blktrace_window_start(struct mbox_context *context)
224 {
225 struct blk_io_trace *trace = &context->trace;
226
227 if (!context->blktracefd)
228 return;
229
230 if (!context->blktrace_start)
231 context->blktrace_start = blktrace_gettime();
232
233 trace->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
234 trace->sequence++;
235 trace->time = blktrace_gettime() - context->blktrace_start;
236 trace->action = BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_READ);
237 trace->pid = 0;
238 trace->device = 0;
239 trace->cpu = 0;
240 trace->error = 0;
241 trace->pdu_len = 0;
242 }
243
blktrace_window_done(struct mbox_context * context)244 static void blktrace_window_done(struct mbox_context *context)
245 {
246 struct blk_io_trace *trace = &context->trace;
247
248 if (!context->blktracefd)
249 return;
250
251 trace->sector = context->current->flash_offset / 512;
252 trace->bytes = context->current->size;
253 write(context->blktracefd, trace, sizeof(*trace));
254 trace->sequence++;
255 trace->action &= ~BLK_TA_QUEUE;
256 trace->action |= BLK_TA_ISSUE;
257 write(context->blktracefd, trace, sizeof(*trace));
258
259 trace->sequence++;
260 trace->time = blktrace_gettime() - context->blktrace_start;
261 trace->action &= ~BLK_TA_ISSUE;
262 trace->action |= BLK_TA_COMPLETE;
263 write(context->blktracefd, trace, sizeof(*trace));
264 }
265
protocol_v1_create_window(struct mbox_context * context,struct protocol_create_window * io)266 static int protocol_v1_create_window(struct mbox_context *context,
267 struct protocol_create_window *io)
268 {
269 struct backend *backend = &context->backend;
270 uint32_t offset;
271 uint32_t size;
272 int rc;
273
274 offset = io->req.offset << backend->block_size_shift;
275 size = io->req.size << backend->block_size_shift;
276 rc = backend_validate(backend, offset, size, io->req.ro);
277 if (rc < 0) {
278 /* Backend does not allow window to be created. */
279 return rc;
280 }
281
282 /* Close the current window if there is one */
283 if (context->current) {
284 /* There is an implicit flush if it was a write window
285 *
286 * protocol_v2_create_window() calls
287 * protocol_v1_create_window(), so use indirect call to
288 * write_flush() to make sure we pick the right one.
289 */
290 if (context->current_is_write) {
291 blktrace_flush_start(context);
292 rc = context->protocol->flush(context, NULL);
293 blktrace_flush_done(context);
294 if (rc < 0) {
295 MSG_ERR("Couldn't Flush Write Window\n");
296 return rc;
297 }
298 }
299 windows_close_current(context, FLAGS_NONE);
300 }
301
302 /* Offset the host has requested */
303 MSG_INFO("Host requested flash @ 0x%.8x\n", offset);
304 /* Check if we have an existing window */
305 blktrace_window_start(context);
306 context->current = windows_search(context, offset,
307 context->version == API_VERSION_1);
308
309 if (!context->current) { /* No existing window */
310 MSG_DBG("No existing window which maps that flash offset\n");
311 rc = windows_create_map(context, &context->current,
312 offset,
313 context->version == API_VERSION_1);
314 if (rc < 0) { /* Unable to map offset */
315 MSG_ERR("Couldn't create window mapping for offset 0x%.8x\n",
316 offset);
317 return rc;
318 }
319 }
320 blktrace_window_done(context);
321
322 context->current_is_write = !io->req.ro;
323
324 MSG_INFO("Window @ %p for size 0x%.8x maps flash offset 0x%.8x\n",
325 context->current->mem, context->current->size,
326 context->current->flash_offset);
327
328 io->resp.lpc_address = get_lpc_addr_shifted(context);
329
330 return 0;
331 }
332
protocol_v1_mark_dirty(struct mbox_context * context,struct protocol_mark_dirty * io)333 static int protocol_v1_mark_dirty(struct mbox_context *context,
334 struct protocol_mark_dirty *io)
335 {
336 uint32_t offset = io->req.v1.offset;
337 uint32_t size = io->req.v1.size;
338 uint32_t off;
339
340 if (!(context->current && context->current_is_write)) {
341 MSG_ERR("Tried to call mark dirty without open write window\n");
342 return -EPERM;
343 }
344
345 /* For V1 offset given relative to flash - we want the window */
346 off = offset - ((context->current->flash_offset) >>
347 context->backend.block_size_shift);
348 if (off > offset) { /* Underflow - before current window */
349 MSG_ERR("Tried to mark dirty before start of window\n");
350 MSG_ERR("requested offset: 0x%x window start: 0x%x\n",
351 offset << context->backend.block_size_shift,
352 context->current->flash_offset);
353 return -EINVAL;
354 }
355 offset = off;
356 /*
357 * We only track dirty at the block level.
358 * For protocol V1 we can get away with just marking the whole
359 * block dirty.
360 */
361 size = align_up(size, 1 << context->backend.block_size_shift);
362 size >>= context->backend.block_size_shift;
363
364 MSG_INFO("Dirty window @ 0x%.8x for 0x%.8x\n",
365 offset << context->backend.block_size_shift,
366 size << context->backend.block_size_shift);
367
368 return window_set_bytemap(context, context->current, offset, size,
369 WINDOW_DIRTY);
370 }
371
generic_flush(struct mbox_context * context)372 static int generic_flush(struct mbox_context *context)
373 {
374 int rc, offset, count;
375 uint8_t prev;
376 size_t i;
377
378 offset = 0;
379 count = 0;
380 prev = WINDOW_CLEAN;
381
382 MSG_INFO("Flush window @ %p for size 0x%.8x which maps flash @ 0x%.8x\n",
383 context->current->mem, context->current->size,
384 context->current->flash_offset);
385
386 /*
387 * We look for streaks of the same type and keep a count, when the type
388 * (dirty/erased) changes we perform the required action on the backing
389 * store and update the current streak-type
390 */
391 for (i = 0; i < (context->current->size >> context->backend.block_size_shift);
392 i++) {
393 uint8_t cur = context->current->dirty_bmap[i];
394 if (cur != WINDOW_CLEAN) {
395 if (cur == prev) { /* Same as previous block, incrmnt */
396 count++;
397 } else if (prev == WINDOW_CLEAN) { /* Start of run */
398 offset = i;
399 count++;
400 } else { /* Change in streak type */
401 rc = window_flush(context, offset, count,
402 prev);
403 if (rc < 0) {
404 return rc;
405 }
406 offset = i;
407 count = 1;
408 }
409 } else {
410 if (prev != WINDOW_CLEAN) { /* End of a streak */
411 rc = window_flush(context, offset, count,
412 prev);
413 if (rc < 0) {
414 return rc;
415 }
416 offset = 0;
417 count = 0;
418 }
419 }
420 prev = cur;
421 }
422
423 if (prev != WINDOW_CLEAN) { /* Still the last streak to write */
424 rc = window_flush(context, offset, count, prev);
425 if (rc < 0) {
426 return rc;
427 }
428 }
429
430 /* Clear the dirty bytemap since we have written back all changes */
431 return window_set_bytemap(context, context->current, 0,
432 context->current->size >>
433 context->backend.block_size_shift,
434 WINDOW_CLEAN);
435 }
436
protocol_v1_flush(struct mbox_context * context,struct protocol_flush * io)437 static int protocol_v1_flush(struct mbox_context *context,
438 struct protocol_flush *io)
439 {
440 int rc;
441
442 if (!(context->current && context->current_is_write)) {
443 MSG_ERR("Tried to call flush without open write window\n");
444 return -EPERM;
445 }
446
447 /*
448 * For V1 the Flush command acts much the same as the dirty command
449 * except with a flush as well. Only do this on an actual flush
450 * command not when we call flush because we've implicitly closed a
451 * window because we might not have the required args in req.
452 */
453 if (io) {
454 struct protocol_mark_dirty *mdio = (void *)io;
455 rc = protocol_v1_mark_dirty(context, mdio);
456 if (rc < 0) {
457 return rc;
458 }
459 }
460
461 return generic_flush(context);
462 }
463
protocol_v1_close(struct mbox_context * context,struct protocol_close * io)464 static int protocol_v1_close(struct mbox_context *context,
465 struct protocol_close *io)
466 {
467 int rc;
468
469 /* Close the current window if there is one */
470 if (!context->current) {
471 return 0;
472 }
473
474 /* There is an implicit flush if it was a write window */
475 if (context->current_is_write) {
476 rc = protocol_v1_flush(context, NULL);
477 if (rc < 0) {
478 MSG_ERR("Couldn't Flush Write Window\n");
479 return rc;
480 }
481 }
482
483 /* Host asked for it -> Don't set the BMC Event */
484 windows_close_current(context, io->req.flags);
485
486 return 0;
487 }
488
protocol_v1_ack(struct mbox_context * context,struct protocol_ack * io)489 static int protocol_v1_ack(struct mbox_context *context,
490 struct protocol_ack *io)
491 {
492 return protocol_events_clear(context,
493 (io->req.flags & BMC_EVENT_ACK_MASK));
494 }
495
496 /*
497 * get_suggested_timeout() - get the suggested timeout value in seconds
498 * @context: The mbox context pointer
499 *
500 * Return: Suggested timeout in seconds
501 */
get_suggested_timeout(struct mbox_context * context)502 static uint16_t get_suggested_timeout(struct mbox_context *context)
503 {
504 struct window_context *window = windows_find_largest(context);
505 uint32_t max_size_mb = window ? (window->size >> 20) : 0;
506 uint16_t ret;
507
508 ret = align_up(max_size_mb * FLASH_ACCESS_MS_PER_MB, 1000) / 1000;
509
510 MSG_DBG("Suggested Timeout: %us, max window size: %uMB, for %dms/MB\n",
511 ret, max_size_mb, FLASH_ACCESS_MS_PER_MB);
512 return ret;
513 }
514
protocol_v2_get_info(struct mbox_context * context,struct protocol_get_info * io)515 static int protocol_v2_get_info(struct mbox_context *context,
516 struct protocol_get_info *io)
517 {
518 uint8_t old_version = context->version;
519 int rc;
520
521 /* Bootstrap protocol version. This may involve {up,down}grading */
522 rc = protocol_negotiate_version(context, io->req.api_version);
523 if (rc < 0)
524 return rc;
525
526 /* Do the {up,down}grade if necessary*/
527 if (rc != old_version) {
528 /* Doing version negotiation, don't alert host to reset */
529 windows_reset_all(context);
530 return context->protocol->get_info(context, io);
531 }
532
533 /* Record the negotiated version for the response */
534 io->resp.api_version = rc;
535
536 /* Now do all required intialisation for v2 */
537
538 /* Knowing blocksize we can allocate the window dirty_bytemap */
539 windows_alloc_dirty_bytemap(context);
540
541 io->resp.v2.block_size_shift = context->backend.block_size_shift;
542 MSG_INFO("Block Size: 0x%.8x (shift: %u)\n",
543 1 << context->backend.block_size_shift, context->backend.block_size_shift);
544
545 io->resp.v2.timeout = get_suggested_timeout(context);
546
547 return lpc_map_memory(context);
548 }
549
protocol_v2_get_flash_info(struct mbox_context * context,struct protocol_get_flash_info * io)550 static int protocol_v2_get_flash_info(struct mbox_context *context,
551 struct protocol_get_flash_info *io)
552 {
553 struct backend *backend = &context->backend;
554
555 io->resp.v2.flash_size =
556 backend->flash_size >> backend->block_size_shift;
557 io->resp.v2.erase_size =
558 ((1 << backend->erase_size_shift) >> backend->block_size_shift);
559
560 return 0;
561 }
562
protocol_v2_create_window(struct mbox_context * context,struct protocol_create_window * io)563 static int protocol_v2_create_window(struct mbox_context *context,
564 struct protocol_create_window *io)
565 {
566 int rc;
567
568 rc = protocol_v1_create_window(context, io);
569 if (rc < 0)
570 return rc;
571
572 io->resp.size = context->current->size >> context->backend.block_size_shift;
573 io->resp.offset = context->current->flash_offset >>
574 context->backend.block_size_shift;
575
576 return 0;
577 }
578
protocol_v2_mark_dirty(struct mbox_context * context,struct protocol_mark_dirty * io)579 static int protocol_v2_mark_dirty(struct mbox_context *context,
580 struct protocol_mark_dirty *io)
581 {
582 if (!(context->current && context->current_is_write)) {
583 MSG_ERR("Tried to call mark dirty without open write window\n");
584 return -EPERM;
585 }
586
587 MSG_INFO("Dirty window @ 0x%.8x for 0x%.8x\n",
588 io->req.v2.offset << context->backend.block_size_shift,
589 io->req.v2.size << context->backend.block_size_shift);
590
591 return window_set_bytemap(context, context->current, io->req.v2.offset,
592 io->req.v2.size, WINDOW_DIRTY);
593 }
594
protocol_v2_erase(struct mbox_context * context,struct protocol_erase * io)595 static int protocol_v2_erase(struct mbox_context *context,
596 struct protocol_erase *io)
597 {
598 size_t start, len;
599 int rc;
600
601 if (!(context->current && context->current_is_write)) {
602 MSG_ERR("Tried to call erase without open write window\n");
603 return -EPERM;
604 }
605
606 MSG_INFO("Erase window @ 0x%.8x for 0x%.8x\n",
607 io->req.offset << context->backend.block_size_shift,
608 io->req.size << context->backend.block_size_shift);
609
610 rc = window_set_bytemap(context, context->current, io->req.offset,
611 io->req.size, WINDOW_ERASED);
612 if (rc < 0) {
613 return rc;
614 }
615
616 /* Write 0xFF to mem -> This ensures consistency between flash & ram */
617 start = io->req.offset << context->backend.block_size_shift;
618 len = io->req.size << context->backend.block_size_shift;
619 memset(context->current->mem + start, 0xFF, len);
620
621 return 0;
622 }
623
protocol_v2_flush(struct mbox_context * context,struct protocol_flush * io)624 static int protocol_v2_flush(struct mbox_context *context __attribute__((unused)),
625 struct protocol_flush *io __attribute__((unused)))
626 {
627 if (!(context->current && context->current_is_write)) {
628 MSG_ERR("Tried to call flush without open write window\n");
629 return -EPERM;
630 }
631
632 return generic_flush(context);
633 }
634
protocol_v2_close(struct mbox_context * context,struct protocol_close * io)635 static int protocol_v2_close(struct mbox_context *context,
636 struct protocol_close *io)
637 {
638 int rc;
639
640 /* Close the current window if there is one */
641 if (!context->current) {
642 return 0;
643 }
644
645 /* There is an implicit flush if it was a write window */
646 if (context->current_is_write) {
647 rc = protocol_v2_flush(context, NULL);
648 if (rc < 0) {
649 MSG_ERR("Couldn't Flush Write Window\n");
650 return rc;
651 }
652 }
653
654 /* Host asked for it -> Don't set the BMC Event */
655 windows_close_current(context, io->req.flags);
656
657 return 0;
658 }
659
660 static const struct protocol_ops protocol_ops_v1 = {
661 .reset = protocol_v1_reset,
662 .get_info = protocol_v1_get_info,
663 .get_flash_info = protocol_v1_get_flash_info,
664 .create_window = protocol_v1_create_window,
665 .mark_dirty = protocol_v1_mark_dirty,
666 .erase = NULL,
667 .flush = protocol_v1_flush,
668 .close = protocol_v1_close,
669 .ack = protocol_v1_ack,
670 };
671
672 static const struct protocol_ops protocol_ops_v2 = {
673 .reset = protocol_v1_reset,
674 .get_info = protocol_v2_get_info,
675 .get_flash_info = protocol_v2_get_flash_info,
676 .create_window = protocol_v2_create_window,
677 .mark_dirty = protocol_v2_mark_dirty,
678 .erase = protocol_v2_erase,
679 .flush = protocol_v2_flush,
680 .close = protocol_v2_close,
681 .ack = protocol_v1_ack,
682 };
683
684 static const struct protocol_ops *protocol_ops_map[] = {
685 [0] = NULL,
686 [1] = &protocol_ops_v1,
687 [2] = &protocol_ops_v2,
688 };
689
protocol_negotiate_version(struct mbox_context * context,uint8_t requested)690 static int protocol_negotiate_version(struct mbox_context *context,
691 uint8_t requested)
692 {
693 /* Check we support the version requested */
694 if (requested < API_MIN_VERSION)
695 return -EINVAL;
696
697 context->version = (requested > API_MAX_VERSION) ?
698 API_MAX_VERSION : requested;
699
700 context->protocol = protocol_ops_map[context->version];
701
702 return context->version;
703 }
704
protocol_init(struct mbox_context * context)705 int protocol_init(struct mbox_context *context)
706 {
707 protocol_negotiate_version(context, API_MAX_VERSION);
708
709 return 0;
710 }
711
protocol_free(struct mbox_context * context)712 void protocol_free(struct mbox_context *context __attribute__((unused)))
713 {
714 return;
715 }
716
717 /* Don't do any state manipulation, just perform the reset */
__protocol_reset(struct mbox_context * context)718 int __protocol_reset(struct mbox_context *context)
719 {
720 enum backend_reset_mode mode;
721 int rc;
722
723 windows_reset_all(context);
724
725 rc = backend_reset(&context->backend, context->mem, context->mem_size);
726 if (rc < 0)
727 return rc;
728
729 mode = rc;
730 if (!(mode == reset_lpc_flash || mode == reset_lpc_memory))
731 return -EINVAL;
732
733 if (mode == reset_lpc_flash)
734 return lpc_map_flash(context);
735
736 assert(mode == reset_lpc_memory);
737 return lpc_map_memory(context);
738 }
739
740 /* Prevent the host from performing actions whilst reset takes place */
protocol_reset(struct mbox_context * context)741 int protocol_reset(struct mbox_context *context)
742 {
743 int rc;
744
745 rc = protocol_events_clear(context, BMC_EVENT_DAEMON_READY);
746 if (rc < 0) {
747 MSG_ERR("Failed to clear daemon ready state, reset failed\n");
748 return rc;
749 }
750
751 rc = __protocol_reset(context);
752 if (rc < 0) {
753 MSG_ERR("Failed to reset protocol, daemon remains not ready\n");
754 return rc;
755 }
756
757 rc = protocol_events_set(context,
758 BMC_EVENT_DAEMON_READY | BMC_EVENT_PROTOCOL_RESET);
759 if (rc < 0) {
760 MSG_ERR("Failed to set daemon ready state, daemon remains not ready\n");
761 return rc;
762 }
763
764 return 0;
765 }
766
767 #pragma GCC diagnostic pop
768