xref: /openbmc/linux/arch/um/include/asm/cache.h (revision b2441318)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __UM_CACHE_H
3 #define __UM_CACHE_H
4 
5 
6 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7 # define L1_CACHE_SHIFT		(CONFIG_X86_L1_CACHE_SHIFT)
8 #elif defined(CONFIG_UML_X86) /* 64-bit */
9 # define L1_CACHE_SHIFT		6 /* Should be 7 on Intel */
10 #else
11 /* XXX: this was taken from x86, now it's completely random. Luckily only
12  * affects SMP padding. */
13 # define L1_CACHE_SHIFT		5
14 #endif
15 
16 #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
17 
18 #endif
19