xref: /openbmc/hiomapd/protocol.c (revision f1e547c7)
1 // SPDX-License-Identifier: Apache-2.0
2 // Copyright (C) 2018 IBM Corp.
3 #include "config.h"
4 
5 #include <errno.h>
6 #include <stdint.h>
7 #include <stdio.h>
8 
9 #include "backend.h"
10 #include "common.h"
11 #include "lpc.h"
12 #include "mboxd.h"
13 #include "protocol.h"
14 #include "windows.h"
15 
16 #define BLOCK_SIZE_SHIFT_V1		12 /* 4K */
17 
18 static inline uint8_t protocol_get_bmc_event_mask(struct mbox_context *context)
19 {
20 	if (context->version == API_VERSION_1) {
21 		return BMC_EVENT_V1_MASK;
22 	}
23 
24 	return BMC_EVENT_V2_MASK;
25 }
26 
27 /*
28  * protocol_events_put() - Push the full set/cleared state of BMC events on the
29  * 			   provided transport
30  * @context:    The mbox context pointer
31  * @ops:	The operations struct for the transport of interest
32  *
33  * Return:      0 on success otherwise negative error code
34  */
35 int protocol_events_put(struct mbox_context *context,
36 			const struct transport_ops *ops)
37 {
38 	const uint8_t mask = protocol_get_bmc_event_mask(context);
39 
40 	return ops->put_events(context, mask);
41 }
42 
43 /*
44  * protocol_events_set() - Update the set BMC events on the active transport
45  * @context:	The mbox context pointer
46  * @bmc_event:	The bits to set
47  *
48  * Return:	0 on success otherwise negative error code
49  */
50 int protocol_events_set(struct mbox_context *context, uint8_t bmc_event)
51 {
52 	const uint8_t mask = protocol_get_bmc_event_mask(context);
53 
54 	/*
55 	 * Store the raw value, as we may up- or down- grade the protocol
56 	 * version and subsequently need to flush the appropriate set. Instead
57 	 * we pass the masked value through to the transport
58 	 */
59 	context->bmc_events |= bmc_event;
60 
61 	return context->transport->set_events(context, bmc_event, mask);
62 }
63 
64 /*
65  * protocol_events_clear() - Update the cleared BMC events on the active
66  *                           transport
67  * @context:	The mbox context pointer
68  * @bmc_event:	The bits to clear
69  *
70  * Return:	0 on success otherwise negative error code
71  */
72 int protocol_events_clear(struct mbox_context *context, uint8_t bmc_event)
73 {
74 	const uint8_t mask = protocol_get_bmc_event_mask(context);
75 
76 	context->bmc_events &= ~bmc_event;
77 
78 	return context->transport->clear_events(context, bmc_event, mask);
79 }
80 
81 static int protocol_negotiate_version(struct mbox_context *context,
82 				      uint8_t requested);
83 
84 static int protocol_v1_reset(struct mbox_context *context)
85 {
86 	return __protocol_reset(context);
87 }
88 
89 static int protocol_negotiate_version(struct mbox_context *context,
90 				      uint8_t requested);
91 
92 static int protocol_v1_get_info(struct mbox_context *context,
93 				struct protocol_get_info *io)
94 {
95 	uint8_t old_version = context->version;
96 	int rc;
97 
98 	/* Bootstrap protocol version. This may involve {up,down}grading */
99 	rc = protocol_negotiate_version(context, io->req.api_version);
100 	if (rc < 0)
101 		return rc;
102 
103 	/* Do the {up,down}grade if necessary*/
104 	if (rc != old_version) {
105 		/* Doing version negotiation, don't alert host to reset */
106 		windows_reset_all(context);
107 		return context->protocol->get_info(context, io);
108 	}
109 
110 	/* Record the negotiated version for the response */
111 	io->resp.api_version = rc;
112 
113 	/* Now do all required intialisation for v1 */
114 	context->backend.block_size_shift = BLOCK_SIZE_SHIFT_V1;
115 	MSG_INFO("Block Size: 0x%.8x (shift: %u)\n",
116 		 1 << context->backend.block_size_shift, context->backend.block_size_shift);
117 
118 	/* Knowing blocksize we can allocate the window dirty_bytemap */
119 	windows_alloc_dirty_bytemap(context);
120 
121 	io->resp.v1.read_window_size =
122 		context->windows.default_size >> context->backend.block_size_shift;
123 	io->resp.v1.write_window_size =
124 		context->windows.default_size >> context->backend.block_size_shift;
125 
126 	return lpc_map_memory(context);
127 }
128 
129 static int protocol_v1_get_flash_info(struct mbox_context *context,
130 				      struct protocol_get_flash_info *io)
131 {
132 	io->resp.v1.flash_size = context->backend.flash_size;
133 	io->resp.v1.erase_size = 1 << context->backend.erase_size_shift;
134 
135 	return 0;
136 }
137 
138 /*
139  * get_lpc_addr_shifted() - Get lpc address of the current window
140  * @context:		The mbox context pointer
141  *
142  * Return:	The lpc address to access that offset shifted by block size
143  */
144 static inline uint16_t get_lpc_addr_shifted(struct mbox_context *context)
145 {
146 	uint32_t lpc_addr, mem_offset;
147 
148 	/* Offset of the current window in the reserved memory region */
149 	mem_offset = context->current->mem - context->mem;
150 	/* Total LPC Address */
151 	lpc_addr = context->lpc_base + mem_offset;
152 
153 	MSG_DBG("LPC address of current window: 0x%.8x\n", lpc_addr);
154 
155 	return lpc_addr >> context->backend.block_size_shift;
156 }
157 
158 static int protocol_v1_create_window(struct mbox_context *context,
159 				     struct protocol_create_window *io)
160 {
161 	struct backend *backend = &context->backend;
162 	uint32_t offset;
163 	uint32_t size;
164 	int rc;
165 
166 	offset = io->req.offset << backend->block_size_shift;
167 	size = io->req.size << backend->block_size_shift;
168 	rc = backend_validate(backend, offset, size, io->req.ro);
169 	if (rc < 0) {
170 		/* Backend does not allow window to be created. */
171 		return rc;
172 	}
173 
174 	/* Close the current window if there is one */
175 	if (context->current) {
176 		/* There is an implicit flush if it was a write window
177 		 *
178 		 * protocol_v2_create_window() calls
179 		 * protocol_v1_create_window(), so use indirect call to
180 		 * write_flush() to make sure we pick the right one.
181 		 */
182 		if (context->current_is_write) {
183 			rc = context->protocol->flush(context, NULL);
184 			if (rc < 0) {
185 				MSG_ERR("Couldn't Flush Write Window\n");
186 				return rc;
187 			}
188 		}
189 		windows_close_current(context, FLAGS_NONE);
190 	}
191 
192 	/* Offset the host has requested */
193 	MSG_INFO("Host requested flash @ 0x%.8x\n", offset);
194 	/* Check if we have an existing window */
195 	context->current = windows_search(context, offset,
196 					  context->version == API_VERSION_1);
197 
198 	if (!context->current) { /* No existing window */
199 		MSG_DBG("No existing window which maps that flash offset\n");
200 		rc = windows_create_map(context, &context->current,
201 				       offset,
202 				       context->version == API_VERSION_1);
203 		if (rc < 0) { /* Unable to map offset */
204 			MSG_ERR("Couldn't create window mapping for offset 0x%.8x\n",
205 				offset);
206 			return rc;
207 		}
208 	}
209 
210 	context->current_is_write = !io->req.ro;
211 
212 	MSG_INFO("Window @ %p for size 0x%.8x maps flash offset 0x%.8x\n",
213 		 context->current->mem, context->current->size,
214 		 context->current->flash_offset);
215 
216 	io->resp.lpc_address = get_lpc_addr_shifted(context);
217 
218 	return 0;
219 }
220 
221 static int protocol_v1_mark_dirty(struct mbox_context *context,
222 				  struct protocol_mark_dirty *io)
223 {
224 	uint32_t offset = io->req.v1.offset;
225 	uint32_t size = io->req.v1.size;
226 	uint32_t off;
227 
228 	if (!(context->current && context->current_is_write)) {
229 		MSG_ERR("Tried to call mark dirty without open write window\n");
230 		return -EPERM;
231 	}
232 
233 	/* For V1 offset given relative to flash - we want the window */
234 	off = offset - ((context->current->flash_offset) >>
235 			context->backend.block_size_shift);
236 	if (off > offset) { /* Underflow - before current window */
237 		MSG_ERR("Tried to mark dirty before start of window\n");
238 		MSG_ERR("requested offset: 0x%x window start: 0x%x\n",
239 				offset << context->backend.block_size_shift,
240 				context->current->flash_offset);
241 		return -EINVAL;
242 	}
243 	offset = off;
244 	/*
245 	 * We only track dirty at the block level.
246 	 * For protocol V1 we can get away with just marking the whole
247 	 * block dirty.
248 	 */
249 	size = align_up(size, 1 << context->backend.block_size_shift);
250 	size >>= context->backend.block_size_shift;
251 
252 	MSG_INFO("Dirty window @ 0x%.8x for 0x%.8x\n",
253 		 offset << context->backend.block_size_shift,
254 		 size << context->backend.block_size_shift);
255 
256 	return window_set_bytemap(context, context->current, offset, size,
257 				  WINDOW_DIRTY);
258 }
259 
260 static int generic_flush(struct mbox_context *context)
261 {
262 	int rc, i, offset, count;
263 	uint8_t prev;
264 
265 	offset = 0;
266 	count = 0;
267 	prev = WINDOW_CLEAN;
268 
269 	MSG_INFO("Flush window @ %p for size 0x%.8x which maps flash @ 0x%.8x\n",
270 		 context->current->mem, context->current->size,
271 		 context->current->flash_offset);
272 
273 	/*
274 	 * We look for streaks of the same type and keep a count, when the type
275 	 * (dirty/erased) changes we perform the required action on the backing
276 	 * store and update the current streak-type
277 	 */
278 	for (i = 0; i < (context->current->size >> context->backend.block_size_shift);
279 			i++) {
280 		uint8_t cur = context->current->dirty_bmap[i];
281 		if (cur != WINDOW_CLEAN) {
282 			if (cur == prev) { /* Same as previous block, incrmnt */
283 				count++;
284 			} else if (prev == WINDOW_CLEAN) { /* Start of run */
285 				offset = i;
286 				count++;
287 			} else { /* Change in streak type */
288 				rc = window_flush(context, offset, count,
289 						       prev);
290 				if (rc < 0) {
291 					return rc;
292 				}
293 				offset = i;
294 				count = 1;
295 			}
296 		} else {
297 			if (prev != WINDOW_CLEAN) { /* End of a streak */
298 				rc = window_flush(context, offset, count,
299 						       prev);
300 				if (rc < 0) {
301 					return rc;
302 				}
303 				offset = 0;
304 				count = 0;
305 			}
306 		}
307 		prev = cur;
308 	}
309 
310 	if (prev != WINDOW_CLEAN) { /* Still the last streak to write */
311 		rc = window_flush(context, offset, count, prev);
312 		if (rc < 0) {
313 			return rc;
314 		}
315 	}
316 
317 	/* Clear the dirty bytemap since we have written back all changes */
318 	return window_set_bytemap(context, context->current, 0,
319 				  context->current->size >>
320 				  context->backend.block_size_shift,
321 				  WINDOW_CLEAN);
322 }
323 
324 static int protocol_v1_flush(struct mbox_context *context,
325 			     struct protocol_flush *io)
326 {
327 	int rc;
328 
329 	if (!(context->current && context->current_is_write)) {
330 		MSG_ERR("Tried to call flush without open write window\n");
331 		return -EPERM;
332 	}
333 
334 	/*
335 	 * For V1 the Flush command acts much the same as the dirty command
336 	 * except with a flush as well. Only do this on an actual flush
337 	 * command not when we call flush because we've implicitly closed a
338 	 * window because we might not have the required args in req.
339 	 */
340 	if (io) {
341 		struct protocol_mark_dirty *mdio = (void *)io;
342 		rc = protocol_v1_mark_dirty(context, mdio);
343 		if (rc < 0) {
344 			return rc;
345 		}
346 	}
347 
348 	return generic_flush(context);
349 }
350 
351 static int protocol_v1_close(struct mbox_context *context,
352 			     struct protocol_close *io)
353 {
354 	int rc;
355 
356 	/* Close the current window if there is one */
357 	if (!context->current) {
358 		return 0;
359 	}
360 
361 	/* There is an implicit flush if it was a write window */
362 	if (context->current_is_write) {
363 		rc = protocol_v1_flush(context, NULL);
364 		if (rc < 0) {
365 			MSG_ERR("Couldn't Flush Write Window\n");
366 			return rc;
367 		}
368 	}
369 
370 	/* Host asked for it -> Don't set the BMC Event */
371 	windows_close_current(context, io->req.flags);
372 
373 	return 0;
374 }
375 
376 static int protocol_v1_ack(struct mbox_context *context,
377 			   struct protocol_ack *io)
378 {
379 	return protocol_events_clear(context,
380 				     (io->req.flags & BMC_EVENT_ACK_MASK));
381 }
382 
383 /*
384  * get_suggested_timeout() - get the suggested timeout value in seconds
385  * @context:	The mbox context pointer
386  *
387  * Return:	Suggested timeout in seconds
388  */
389 static uint16_t get_suggested_timeout(struct mbox_context *context)
390 {
391 	struct window_context *window = windows_find_largest(context);
392 	uint32_t max_size_mb = window ? (window->size >> 20) : 0;
393 	uint16_t ret;
394 
395 	ret = align_up(max_size_mb * FLASH_ACCESS_MS_PER_MB, 1000) / 1000;
396 
397 	MSG_DBG("Suggested Timeout: %us, max window size: %uMB, for %dms/MB\n",
398 		ret, max_size_mb, FLASH_ACCESS_MS_PER_MB);
399 	return ret;
400 }
401 
402 static int protocol_v2_get_info(struct mbox_context *context,
403 				struct protocol_get_info *io)
404 {
405 	uint8_t old_version = context->version;
406 	int rc;
407 
408 	/* Bootstrap protocol version. This may involve {up,down}grading */
409 	rc = protocol_negotiate_version(context, io->req.api_version);
410 	if (rc < 0)
411 		return rc;
412 
413 	/* Do the {up,down}grade if necessary*/
414 	if (rc != old_version) {
415 		/* Doing version negotiation, don't alert host to reset */
416 		windows_reset_all(context);
417 		return context->protocol->get_info(context, io);
418 	}
419 
420 	/* Record the negotiated version for the response */
421 	io->resp.api_version = rc;
422 
423 	/* Now do all required intialisation for v2 */
424 
425 	/* Knowing blocksize we can allocate the window dirty_bytemap */
426 	windows_alloc_dirty_bytemap(context);
427 
428 	io->resp.v2.block_size_shift = context->backend.block_size_shift;
429 	MSG_INFO("Block Size: 0x%.8x (shift: %u)\n",
430 		 1 << context->backend.block_size_shift, context->backend.block_size_shift);
431 
432 	io->resp.v2.timeout = get_suggested_timeout(context);
433 
434 	return lpc_map_memory(context);
435 }
436 
437 static int protocol_v2_get_flash_info(struct mbox_context *context,
438 				      struct protocol_get_flash_info *io)
439 {
440 	struct backend *backend = &context->backend;
441 
442 	io->resp.v2.flash_size =
443 		backend->flash_size >> backend->block_size_shift;
444 	io->resp.v2.erase_size =
445 		((1 << backend->erase_size_shift) >> backend->block_size_shift);
446 
447 	return 0;
448 }
449 
450 static int protocol_v2_create_window(struct mbox_context *context,
451 				     struct protocol_create_window *io)
452 {
453 	int rc;
454 
455 	rc = protocol_v1_create_window(context, io);
456 	if (rc < 0)
457 		return rc;
458 
459 	io->resp.size = context->current->size >> context->backend.block_size_shift;
460 	io->resp.offset = context->current->flash_offset >>
461 					context->backend.block_size_shift;
462 
463 	return 0;
464 }
465 
466 static int protocol_v2_mark_dirty(struct mbox_context *context,
467 				  struct protocol_mark_dirty *io)
468 {
469 	if (!(context->current && context->current_is_write)) {
470 		MSG_ERR("Tried to call mark dirty without open write window\n");
471 		return -EPERM;
472 	}
473 
474 	MSG_INFO("Dirty window @ 0x%.8x for 0x%.8x\n",
475 		 io->req.v2.offset << context->backend.block_size_shift,
476 		 io->req.v2.size << context->backend.block_size_shift);
477 
478 	return window_set_bytemap(context, context->current, io->req.v2.offset,
479 				  io->req.v2.size, WINDOW_DIRTY);
480 }
481 
482 static int protocol_v2_erase(struct mbox_context *context,
483 			     struct protocol_erase *io)
484 {
485 	size_t start, len;
486 	int rc;
487 
488 	if (!(context->current && context->current_is_write)) {
489 		MSG_ERR("Tried to call erase without open write window\n");
490 		return -EPERM;
491 	}
492 
493 	MSG_INFO("Erase window @ 0x%.8x for 0x%.8x\n",
494 		 io->req.offset << context->backend.block_size_shift,
495 		 io->req.size << context->backend.block_size_shift);
496 
497 	rc = window_set_bytemap(context, context->current, io->req.offset,
498 				io->req.size, WINDOW_ERASED);
499 	if (rc < 0) {
500 		return rc;
501 	}
502 
503 	/* Write 0xFF to mem -> This ensures consistency between flash & ram */
504 	start = io->req.offset << context->backend.block_size_shift;
505 	len = io->req.size << context->backend.block_size_shift;
506 	memset(context->current->mem + start, 0xFF, len);
507 
508 	return 0;
509 }
510 
511 static int protocol_v2_flush(struct mbox_context *context,
512 			     struct protocol_flush *io)
513 {
514 	if (!(context->current && context->current_is_write)) {
515 		MSG_ERR("Tried to call flush without open write window\n");
516 		return -EPERM;
517 	}
518 
519 	return generic_flush(context);
520 }
521 
522 static int protocol_v2_close(struct mbox_context *context,
523 			     struct protocol_close *io)
524 {
525 	int rc;
526 
527 	/* Close the current window if there is one */
528 	if (!context->current) {
529 		return 0;
530 	}
531 
532 	/* There is an implicit flush if it was a write window */
533 	if (context->current_is_write) {
534 		rc = protocol_v2_flush(context, NULL);
535 		if (rc < 0) {
536 			MSG_ERR("Couldn't Flush Write Window\n");
537 			return rc;
538 		}
539 	}
540 
541 	/* Host asked for it -> Don't set the BMC Event */
542 	windows_close_current(context, io->req.flags);
543 
544 	return 0;
545 }
546 
547 static const struct protocol_ops protocol_ops_v1 = {
548 	.reset = protocol_v1_reset,
549 	.get_info = protocol_v1_get_info,
550 	.get_flash_info = protocol_v1_get_flash_info,
551 	.create_window = protocol_v1_create_window,
552 	.mark_dirty = protocol_v1_mark_dirty,
553 	.erase = NULL,
554 	.flush = protocol_v1_flush,
555 	.close = protocol_v1_close,
556 	.ack = protocol_v1_ack,
557 };
558 
559 static const struct protocol_ops protocol_ops_v2 = {
560 	.reset = protocol_v1_reset,
561 	.get_info = protocol_v2_get_info,
562 	.get_flash_info = protocol_v2_get_flash_info,
563 	.create_window = protocol_v2_create_window,
564 	.mark_dirty = protocol_v2_mark_dirty,
565 	.erase = protocol_v2_erase,
566 	.flush = protocol_v2_flush,
567 	.close = protocol_v2_close,
568 	.ack = protocol_v1_ack,
569 };
570 
571 static const struct protocol_ops *protocol_ops_map[] = {
572 	[0] = NULL,
573 	[1] = &protocol_ops_v1,
574 	[2] = &protocol_ops_v2,
575 };
576 
577 static int protocol_negotiate_version(struct mbox_context *context,
578 				      uint8_t requested)
579 {
580 	/* Check we support the version requested */
581 	if (requested < API_MIN_VERSION)
582 		return -EINVAL;
583 
584 	context->version = (requested > API_MAX_VERSION) ?
585 				API_MAX_VERSION : requested;
586 
587 	context->protocol = protocol_ops_map[context->version];
588 
589 	return context->version;
590 }
591 
592 int protocol_init(struct mbox_context *context)
593 {
594 	protocol_negotiate_version(context, API_MAX_VERSION);
595 
596 	return 0;
597 }
598 
599 void protocol_free(struct mbox_context *context)
600 {
601 	return;
602 }
603 
604 /* Don't do any state manipulation, just perform the reset */
605 int __protocol_reset(struct mbox_context *context)
606 {
607 	enum backend_reset_mode mode;
608 	int rc;
609 
610 	windows_reset_all(context);
611 
612 	rc = backend_reset(&context->backend, context->mem, context->mem_size);
613 	if (rc < 0)
614 		return rc;
615 
616 	mode = rc;
617 	if (!(mode == reset_lpc_flash || mode == reset_lpc_memory))
618 		return -EINVAL;
619 
620 	if (mode == reset_lpc_flash)
621 		return lpc_map_flash(context);
622 
623 	assert(mode == reset_lpc_memory);
624 	return lpc_map_memory(context);
625 }
626 
627 /* Prevent the host from performing actions whilst reset takes place */
628 int protocol_reset(struct mbox_context *context)
629 {
630 	int rc;
631 
632 	rc = protocol_events_clear(context, BMC_EVENT_DAEMON_READY);
633 	if (rc < 0) {
634 		MSG_ERR("Failed to clear daemon ready state, reset failed\n");
635 		return rc;
636 	}
637 
638 	rc = __protocol_reset(context);
639 	if (rc < 0) {
640 		MSG_ERR("Failed to reset protocol, daemon remains not ready\n");
641 		return rc;
642 	}
643 
644 	rc = protocol_events_set(context,
645 			BMC_EVENT_DAEMON_READY | BMC_EVENT_PROTOCOL_RESET);
646 	if (rc < 0) {
647 		MSG_ERR("Failed to set daemon ready state, daemon remains not ready\n");
648 		return rc;
649 	}
650 
651 	return 0;
652 }
653