1*83d290c5STom Rini // SPDX-License-Identifier: GPL-2.0+
2b660df3cSMarek Vasut /*
3b660df3cSMarek Vasut * Generic bounce buffer implementation
4b660df3cSMarek Vasut *
5b660df3cSMarek Vasut * Copyright (C) 2012 Marek Vasut <marex@denx.de>
6b660df3cSMarek Vasut */
7b660df3cSMarek Vasut
8b660df3cSMarek Vasut #include <common.h>
9b660df3cSMarek Vasut #include <malloc.h>
10b660df3cSMarek Vasut #include <errno.h>
11b660df3cSMarek Vasut #include <bouncebuf.h>
12b660df3cSMarek Vasut
addr_aligned(struct bounce_buffer * state)1384d35b28SStephen Warren static int addr_aligned(struct bounce_buffer *state)
14b660df3cSMarek Vasut {
15b660df3cSMarek Vasut const ulong align_mask = ARCH_DMA_MINALIGN - 1;
16b660df3cSMarek Vasut
17b660df3cSMarek Vasut /* Check if start is aligned */
1884d35b28SStephen Warren if ((ulong)state->user_buffer & align_mask) {
1984d35b28SStephen Warren debug("Unaligned buffer address %p\n", state->user_buffer);
20b660df3cSMarek Vasut return 0;
21b660df3cSMarek Vasut }
22b660df3cSMarek Vasut
2384d35b28SStephen Warren /* Check if length is aligned */
2484d35b28SStephen Warren if (state->len != state->len_aligned) {
255d69a5d1SVasili Galka debug("Unaligned buffer length %zu\n", state->len);
26b660df3cSMarek Vasut return 0;
27b660df3cSMarek Vasut }
28b660df3cSMarek Vasut
29b660df3cSMarek Vasut /* Aligned */
30b660df3cSMarek Vasut return 1;
31b660df3cSMarek Vasut }
32b660df3cSMarek Vasut
bounce_buffer_start(struct bounce_buffer * state,void * data,size_t len,unsigned int flags)3384d35b28SStephen Warren int bounce_buffer_start(struct bounce_buffer *state, void *data,
3484d35b28SStephen Warren size_t len, unsigned int flags)
35b660df3cSMarek Vasut {
3684d35b28SStephen Warren state->user_buffer = data;
3784d35b28SStephen Warren state->bounce_buffer = data;
3884d35b28SStephen Warren state->len = len;
3984d35b28SStephen Warren state->len_aligned = roundup(len, ARCH_DMA_MINALIGN);
4084d35b28SStephen Warren state->flags = flags;
41b660df3cSMarek Vasut
4284d35b28SStephen Warren if (!addr_aligned(state)) {
4384d35b28SStephen Warren state->bounce_buffer = memalign(ARCH_DMA_MINALIGN,
4484d35b28SStephen Warren state->len_aligned);
4584d35b28SStephen Warren if (!state->bounce_buffer)
46b660df3cSMarek Vasut return -ENOMEM;
47b660df3cSMarek Vasut
4884d35b28SStephen Warren if (state->flags & GEN_BB_READ)
4984d35b28SStephen Warren memcpy(state->bounce_buffer, state->user_buffer,
5084d35b28SStephen Warren state->len);
5184d35b28SStephen Warren }
52b660df3cSMarek Vasut
5384d35b28SStephen Warren /*
5484d35b28SStephen Warren * Flush data to RAM so DMA reads can pick it up,
5584d35b28SStephen Warren * and any CPU writebacks don't race with DMA writes
5684d35b28SStephen Warren */
5784d35b28SStephen Warren flush_dcache_range((unsigned long)state->bounce_buffer,
5884d35b28SStephen Warren (unsigned long)(state->bounce_buffer) +
5984d35b28SStephen Warren state->len_aligned);
60b660df3cSMarek Vasut
61b660df3cSMarek Vasut return 0;
62b660df3cSMarek Vasut }
63b660df3cSMarek Vasut
bounce_buffer_stop(struct bounce_buffer * state)6484d35b28SStephen Warren int bounce_buffer_stop(struct bounce_buffer *state)
65b660df3cSMarek Vasut {
6684d35b28SStephen Warren if (state->flags & GEN_BB_WRITE) {
6784d35b28SStephen Warren /* Invalidate cache so that CPU can see any newly DMA'd data */
6884d35b28SStephen Warren invalidate_dcache_range((unsigned long)state->bounce_buffer,
6984d35b28SStephen Warren (unsigned long)(state->bounce_buffer) +
7084d35b28SStephen Warren state->len_aligned);
7184d35b28SStephen Warren }
72b660df3cSMarek Vasut
7384d35b28SStephen Warren if (state->bounce_buffer == state->user_buffer)
74b660df3cSMarek Vasut return 0;
75b660df3cSMarek Vasut
7684d35b28SStephen Warren if (state->flags & GEN_BB_WRITE)
7784d35b28SStephen Warren memcpy(state->user_buffer, state->bounce_buffer, state->len);
78b660df3cSMarek Vasut
7984d35b28SStephen Warren free(state->bounce_buffer);
80b660df3cSMarek Vasut
81b660df3cSMarek Vasut return 0;
82b660df3cSMarek Vasut }
83