1*83d290c5STom Rini /* SPDX-License-Identifier: GPL-2.0+ */
26e295186SSimon Glass /*
36e295186SSimon Glass * Copyright (c) 2015 Google, Inc
46e295186SSimon Glass */
56e295186SSimon Glass
66e295186SSimon Glass #ifndef __ALIGNMEM_H
76e295186SSimon Glass #define __ALIGNMEM_H
86e295186SSimon Glass
96e295186SSimon Glass /*
106e295186SSimon Glass * ARCH_DMA_MINALIGN is defined in asm/cache.h for each architecture. It
116e295186SSimon Glass * is used to align DMA buffers.
126e295186SSimon Glass */
136e295186SSimon Glass #ifndef __ASSEMBLY__
146e295186SSimon Glass #include <asm/cache.h>
156e295186SSimon Glass #include <malloc.h>
166e295186SSimon Glass
17cf92e05cSSimon Glass /*
18cf92e05cSSimon Glass * The ALLOC_CACHE_ALIGN_BUFFER macro is used to allocate a buffer on the
19cf92e05cSSimon Glass * stack that meets the minimum architecture alignment requirements for DMA.
20cf92e05cSSimon Glass * Such a buffer is useful for DMA operations where flushing and invalidating
21cf92e05cSSimon Glass * the cache before and after a read and/or write operation is required for
22cf92e05cSSimon Glass * correct operations.
23cf92e05cSSimon Glass *
24cf92e05cSSimon Glass * When called the macro creates an array on the stack that is sized such
25cf92e05cSSimon Glass * that:
26cf92e05cSSimon Glass *
27cf92e05cSSimon Glass * 1) The beginning of the array can be advanced enough to be aligned.
28cf92e05cSSimon Glass *
29cf92e05cSSimon Glass * 2) The size of the aligned portion of the array is a multiple of the minimum
30cf92e05cSSimon Glass * architecture alignment required for DMA.
31cf92e05cSSimon Glass *
32cf92e05cSSimon Glass * 3) The aligned portion contains enough space for the original number of
33cf92e05cSSimon Glass * elements requested.
34cf92e05cSSimon Glass *
35cf92e05cSSimon Glass * The macro then creates a pointer to the aligned portion of this array and
36cf92e05cSSimon Glass * assigns to the pointer the address of the first element in the aligned
37cf92e05cSSimon Glass * portion of the array.
38cf92e05cSSimon Glass *
39cf92e05cSSimon Glass * Calling the macro as:
40cf92e05cSSimon Glass *
41cf92e05cSSimon Glass * ALLOC_CACHE_ALIGN_BUFFER(uint32_t, buffer, 1024);
42cf92e05cSSimon Glass *
43cf92e05cSSimon Glass * Will result in something similar to saying:
44cf92e05cSSimon Glass *
45cf92e05cSSimon Glass * uint32_t buffer[1024];
46cf92e05cSSimon Glass *
47cf92e05cSSimon Glass * The following differences exist:
48cf92e05cSSimon Glass *
49cf92e05cSSimon Glass * 1) The resulting buffer is guaranteed to be aligned to the value of
50cf92e05cSSimon Glass * ARCH_DMA_MINALIGN.
51cf92e05cSSimon Glass *
52cf92e05cSSimon Glass * 2) The buffer variable created by the macro is a pointer to the specified
53cf92e05cSSimon Glass * type, and NOT an array of the specified type. This can be very important
54cf92e05cSSimon Glass * if you want the address of the buffer, which you probably do, to pass it
55cf92e05cSSimon Glass * to the DMA hardware. The value of &buffer is different in the two cases.
56cf92e05cSSimon Glass * In the macro case it will be the address of the pointer, not the address
57cf92e05cSSimon Glass * of the space reserved for the buffer. However, in the second case it
58cf92e05cSSimon Glass * would be the address of the buffer. So if you are replacing hard coded
59cf92e05cSSimon Glass * stack buffers with this macro you need to make sure you remove the & from
60cf92e05cSSimon Glass * the locations where you are taking the address of the buffer.
61cf92e05cSSimon Glass *
62cf92e05cSSimon Glass * Note that the size parameter is the number of array elements to allocate,
63cf92e05cSSimon Glass * not the number of bytes.
64cf92e05cSSimon Glass *
65cf92e05cSSimon Glass * This macro can not be used outside of function scope, or for the creation
66cf92e05cSSimon Glass * of a function scoped static buffer. It can not be used to create a cache
67cf92e05cSSimon Glass * line aligned global buffer.
68cf92e05cSSimon Glass */
69cf92e05cSSimon Glass #define PAD_COUNT(s, pad) (((s) - 1) / (pad) + 1)
70cf92e05cSSimon Glass #define PAD_SIZE(s, pad) (PAD_COUNT(s, pad) * pad)
71cf92e05cSSimon Glass #define ALLOC_ALIGN_BUFFER_PAD(type, name, size, align, pad) \
72cf92e05cSSimon Glass char __##name[ROUND(PAD_SIZE((size) * sizeof(type), pad), align) \
73cf92e05cSSimon Glass + (align - 1)]; \
74cf92e05cSSimon Glass \
75cf92e05cSSimon Glass type *name = (type *)ALIGN((uintptr_t)__##name, align)
76cf92e05cSSimon Glass #define ALLOC_ALIGN_BUFFER(type, name, size, align) \
77cf92e05cSSimon Glass ALLOC_ALIGN_BUFFER_PAD(type, name, size, align, 1)
78cf92e05cSSimon Glass #define ALLOC_CACHE_ALIGN_BUFFER_PAD(type, name, size, pad) \
79cf92e05cSSimon Glass ALLOC_ALIGN_BUFFER_PAD(type, name, size, ARCH_DMA_MINALIGN, pad)
80cf92e05cSSimon Glass #define ALLOC_CACHE_ALIGN_BUFFER(type, name, size) \
81cf92e05cSSimon Glass ALLOC_ALIGN_BUFFER(type, name, size, ARCH_DMA_MINALIGN)
82cf92e05cSSimon Glass
83cf92e05cSSimon Glass /*
84cf92e05cSSimon Glass * DEFINE_CACHE_ALIGN_BUFFER() is similar to ALLOC_CACHE_ALIGN_BUFFER, but it's
85cf92e05cSSimon Glass * purpose is to allow allocating aligned buffers outside of function scope.
86cf92e05cSSimon Glass * Usage of this macro shall be avoided or used with extreme care!
87cf92e05cSSimon Glass */
88cf92e05cSSimon Glass #define DEFINE_ALIGN_BUFFER(type, name, size, align) \
89cf92e05cSSimon Glass static char __##name[ALIGN(size * sizeof(type), align)] \
90cf92e05cSSimon Glass __aligned(align); \
91cf92e05cSSimon Glass \
92cf92e05cSSimon Glass static type *name = (type *)__##name
93cf92e05cSSimon Glass #define DEFINE_CACHE_ALIGN_BUFFER(type, name, size) \
94cf92e05cSSimon Glass DEFINE_ALIGN_BUFFER(type, name, size, ARCH_DMA_MINALIGN)
95cf92e05cSSimon Glass
96cf92e05cSSimon Glass /**
97cf92e05cSSimon Glass * malloc_cache_aligned() - allocate a memory region aligned to cache line size
98cf92e05cSSimon Glass *
99cf92e05cSSimon Glass * This allocates memory at a cache-line boundary. The amount allocated may
100cf92e05cSSimon Glass * be larger than requested as it is rounded up to the nearest multiple of the
101cf92e05cSSimon Glass * cache-line size. This ensured that subsequent cache operations on this
102cf92e05cSSimon Glass * memory (flush, invalidate) will not affect subsequently allocated regions.
103cf92e05cSSimon Glass *
104cf92e05cSSimon Glass * @size: Minimum number of bytes to allocate
105cf92e05cSSimon Glass *
106cf92e05cSSimon Glass * @return pointer to new memory region, or NULL if there is no more memory
107cf92e05cSSimon Glass * available.
108cf92e05cSSimon Glass */
malloc_cache_aligned(size_t size)1096e295186SSimon Glass static inline void *malloc_cache_aligned(size_t size)
1106e295186SSimon Glass {
1116e295186SSimon Glass return memalign(ARCH_DMA_MINALIGN, ALIGN(size, ARCH_DMA_MINALIGN));
1126e295186SSimon Glass }
1136e295186SSimon Glass #endif
1146e295186SSimon Glass
1156e295186SSimon Glass #endif /* __ALIGNMEM_H */
116