xref: /openbmc/linux/arch/ia64/include/asm/cache.h (revision cbecf716ca618fd44feda6bd9a64a8179d031fc5)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
27f30491cSTony Luck #ifndef _ASM_IA64_CACHE_H
37f30491cSTony Luck #define _ASM_IA64_CACHE_H
47f30491cSTony Luck 
57f30491cSTony Luck 
67f30491cSTony Luck /*
77f30491cSTony Luck  * Copyright (C) 1998-2000 Hewlett-Packard Co
87f30491cSTony Luck  *	David Mosberger-Tang <davidm@hpl.hp.com>
97f30491cSTony Luck  */
107f30491cSTony Luck 
117f30491cSTony Luck /* Bytes per L1 (data) cache line.  */
127f30491cSTony Luck #define L1_CACHE_SHIFT		CONFIG_IA64_L1_CACHE_SHIFT
137f30491cSTony Luck #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
147f30491cSTony Luck 
157f30491cSTony Luck #ifdef CONFIG_SMP
167f30491cSTony Luck # define SMP_CACHE_SHIFT	L1_CACHE_SHIFT
177f30491cSTony Luck # define SMP_CACHE_BYTES	L1_CACHE_BYTES
187f30491cSTony Luck #else
197f30491cSTony Luck   /*
207f30491cSTony Luck    * The "aligned" directive can only _increase_ alignment, so this is
217f30491cSTony Luck    * safe and provides an easy way to avoid wasting space on a
227f30491cSTony Luck    * uni-processor:
237f30491cSTony Luck    */
247f30491cSTony Luck # define SMP_CACHE_SHIFT	3
257f30491cSTony Luck # define SMP_CACHE_BYTES	(1 << 3)
267f30491cSTony Luck #endif
277f30491cSTony Luck 
28*33def849SJoe Perches #define __read_mostly __section(".data..read_mostly")
297f30491cSTony Luck 
307f30491cSTony Luck #endif /* _ASM_IA64_CACHE_H */
31