xref: /openbmc/linux/arch/arm/include/asm/cache.h (revision 33def849)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
24baa9922SRussell King /*
34baa9922SRussell King  *  arch/arm/include/asm/cache.h
44baa9922SRussell King  */
54baa9922SRussell King #ifndef __ASMARM_CACHE_H
64baa9922SRussell King #define __ASMARM_CACHE_H
74baa9922SRussell King 
8910a17e5SKirill A. Shutemov #define L1_CACHE_SHIFT		CONFIG_ARM_L1_CACHE_SHIFT
94baa9922SRussell King #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
104baa9922SRussell King 
11eb5f4ca9SMartin Fuzzey /*
12eb5f4ca9SMartin Fuzzey  * Memory returned by kmalloc() may be used for DMA, so we must make
13eb5f4ca9SMartin Fuzzey  * sure that all such allocations are cache aligned. Otherwise,
14eb5f4ca9SMartin Fuzzey  * unrelated code may cause parts of the buffer to be read into the
15eb5f4ca9SMartin Fuzzey  * cache before the transfer is done, causing old data to be seen by
16eb5f4ca9SMartin Fuzzey  * the CPU.
17eb5f4ca9SMartin Fuzzey  */
18a6eb9fe1SFUJITA Tomonori #define ARCH_DMA_MINALIGN	L1_CACHE_BYTES
19eb5f4ca9SMartin Fuzzey 
20eb5f4ca9SMartin Fuzzey /*
21eb5f4ca9SMartin Fuzzey  * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
22eb5f4ca9SMartin Fuzzey  */
23eb5f4ca9SMartin Fuzzey #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
24eb5f4ca9SMartin Fuzzey #define ARCH_SLAB_MINALIGN 8
25eb5f4ca9SMartin Fuzzey #endif
26eb5f4ca9SMartin Fuzzey 
2733def849SJoe Perches #define __read_mostly __section(".data..read_mostly")
28daf87416SRussell King 
294baa9922SRussell King #endif
30