xref: /openbmc/linux/include/linux/cache.h (revision 2612e3bbc0386368a850140a6c9b990cd496a5ec)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds #ifndef __LINUX_CACHE_H
31da177e4SLinus Torvalds #define __LINUX_CACHE_H
41da177e4SLinus Torvalds 
5c28aa1f0SJoe Perches #include <uapi/linux/kernel.h>
61da177e4SLinus Torvalds #include <asm/cache.h>
71da177e4SLinus Torvalds 
81da177e4SLinus Torvalds #ifndef L1_CACHE_ALIGN
9c28aa1f0SJoe Perches #define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
101da177e4SLinus Torvalds #endif
111da177e4SLinus Torvalds 
121da177e4SLinus Torvalds #ifndef SMP_CACHE_BYTES
131da177e4SLinus Torvalds #define SMP_CACHE_BYTES L1_CACHE_BYTES
141da177e4SLinus Torvalds #endif
151da177e4SLinus Torvalds 
16c74ba8b3SKees Cook /*
17c74ba8b3SKees Cook  * __read_mostly is used to keep rarely changing variables out of frequently
184fa72523SLuis Chamberlain  * updated cachelines. Its use should be reserved for data that is used
194fa72523SLuis Chamberlain  * frequently in hot paths. Performance traces can help decide when to use
204fa72523SLuis Chamberlain  * this. You want __read_mostly data to be tightly packed, so that in the
214fa72523SLuis Chamberlain  * best case multiple frequently read variables for a hot path will be next
224fa72523SLuis Chamberlain  * to each other in order to reduce the number of cachelines needed to
234fa72523SLuis Chamberlain  * execute a critical path. We should be mindful and selective of its use.
244fa72523SLuis Chamberlain  * ie: if you're going to use it please supply a *good* justification in your
254fa72523SLuis Chamberlain  * commit log
26c74ba8b3SKees Cook  */
27804f1594SKyle McMartin #ifndef __read_mostly
286c036527SChristoph Lameter #define __read_mostly
296c036527SChristoph Lameter #endif
306c036527SChristoph Lameter 
31c74ba8b3SKees Cook /*
32c74ba8b3SKees Cook  * __ro_after_init is used to mark things that are read-only after init (i.e.
33c74ba8b3SKees Cook  * after mark_rodata_ro() has been called). These are effectively read-only,
34c74ba8b3SKees Cook  * but may get written to during init, so can't live in .rodata (via "const").
35c74ba8b3SKees Cook  */
36c74ba8b3SKees Cook #ifndef __ro_after_init
3733def849SJoe Perches #define __ro_after_init __section(".data..ro_after_init")
38c74ba8b3SKees Cook #endif
39c74ba8b3SKees Cook 
401da177e4SLinus Torvalds #ifndef ____cacheline_aligned
411da177e4SLinus Torvalds #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
421da177e4SLinus Torvalds #endif
431da177e4SLinus Torvalds 
441da177e4SLinus Torvalds #ifndef ____cacheline_aligned_in_smp
451da177e4SLinus Torvalds #ifdef CONFIG_SMP
461da177e4SLinus Torvalds #define ____cacheline_aligned_in_smp ____cacheline_aligned
471da177e4SLinus Torvalds #else
481da177e4SLinus Torvalds #define ____cacheline_aligned_in_smp
491da177e4SLinus Torvalds #endif /* CONFIG_SMP */
501da177e4SLinus Torvalds #endif
511da177e4SLinus Torvalds 
521da177e4SLinus Torvalds #ifndef __cacheline_aligned
531da177e4SLinus Torvalds #define __cacheline_aligned					\
541da177e4SLinus Torvalds   __attribute__((__aligned__(SMP_CACHE_BYTES),			\
554af57b78STim Abbott 		 __section__(".data..cacheline_aligned")))
561da177e4SLinus Torvalds #endif /* __cacheline_aligned */
571da177e4SLinus Torvalds 
581da177e4SLinus Torvalds #ifndef __cacheline_aligned_in_smp
591da177e4SLinus Torvalds #ifdef CONFIG_SMP
601da177e4SLinus Torvalds #define __cacheline_aligned_in_smp __cacheline_aligned
611da177e4SLinus Torvalds #else
621da177e4SLinus Torvalds #define __cacheline_aligned_in_smp
631da177e4SLinus Torvalds #endif /* CONFIG_SMP */
641da177e4SLinus Torvalds #endif
651da177e4SLinus Torvalds 
6622fc6eccSRavikiran G Thirumalai /*
6722fc6eccSRavikiran G Thirumalai  * The maximum alignment needed for some critical structures
6822fc6eccSRavikiran G Thirumalai  * These could be inter-node cacheline sizes/L3 cacheline
6922fc6eccSRavikiran G Thirumalai  * size etc.  Define this in asm/cache.h for your arch
7022fc6eccSRavikiran G Thirumalai  */
7122fc6eccSRavikiran G Thirumalai #ifndef INTERNODE_CACHE_SHIFT
7222fc6eccSRavikiran G Thirumalai #define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT
7322fc6eccSRavikiran G Thirumalai #endif
7422fc6eccSRavikiran G Thirumalai 
7522fc6eccSRavikiran G Thirumalai #if !defined(____cacheline_internodealigned_in_smp)
761da177e4SLinus Torvalds #if defined(CONFIG_SMP)
7722fc6eccSRavikiran G Thirumalai #define ____cacheline_internodealigned_in_smp \
7822fc6eccSRavikiran G Thirumalai 	__attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT))))
791da177e4SLinus Torvalds #else
8022fc6eccSRavikiran G Thirumalai #define ____cacheline_internodealigned_in_smp
811da177e4SLinus Torvalds #endif
821da177e4SLinus Torvalds #endif
831da177e4SLinus Torvalds 
841b27d05bSPekka Enberg #ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
851b27d05bSPekka Enberg #define cache_line_size()	L1_CACHE_BYTES
861b27d05bSPekka Enberg #endif
871b27d05bSPekka Enberg 
88e6ad640bSShakeel Butt /*
89e6ad640bSShakeel Butt  * Helper to add padding within a struct to ensure data fall into separate
90e6ad640bSShakeel Butt  * cachelines.
91e6ad640bSShakeel Butt  */
92e6ad640bSShakeel Butt #if defined(CONFIG_SMP)
93e6ad640bSShakeel Butt struct cacheline_padding {
94e6ad640bSShakeel Butt 	char x[0];
95e6ad640bSShakeel Butt } ____cacheline_internodealigned_in_smp;
96e6ad640bSShakeel Butt #define CACHELINE_PADDING(name)		struct cacheline_padding name
97e6ad640bSShakeel Butt #else
98e6ad640bSShakeel Butt #define CACHELINE_PADDING(name)
99e6ad640bSShakeel Butt #endif
100e6ad640bSShakeel Butt 
101*4ab5f8ecSCatalin Marinas #ifdef ARCH_DMA_MINALIGN
102*4ab5f8ecSCatalin Marinas #define ARCH_HAS_DMA_MINALIGN
103*4ab5f8ecSCatalin Marinas #else
104*4ab5f8ecSCatalin Marinas #define ARCH_DMA_MINALIGN __alignof__(unsigned long long)
105*4ab5f8ecSCatalin Marinas #endif
106*4ab5f8ecSCatalin Marinas 
1071da177e4SLinus Torvalds #endif /* __LINUX_CACHE_H */
108