xref: /openbmc/linux/arch/ia64/include/asm/barrier.h (revision 498495dba268b20e8eadd7fe93c140c68b6cc9d2)
1*b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2c140d879SDavid Howells /*
3c140d879SDavid Howells  * Memory barrier definitions.  This is based on information published
4c140d879SDavid Howells  * in the Processor Abstraction Layer and the System Abstraction Layer
5c140d879SDavid Howells  * manual.
6c140d879SDavid Howells  *
7c140d879SDavid Howells  * Copyright (C) 1998-2003 Hewlett-Packard Co
8c140d879SDavid Howells  *	David Mosberger-Tang <davidm@hpl.hp.com>
9c140d879SDavid Howells  * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
10c140d879SDavid Howells  * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
11c140d879SDavid Howells  */
12c140d879SDavid Howells #ifndef _ASM_IA64_BARRIER_H
13c140d879SDavid Howells #define _ASM_IA64_BARRIER_H
14c140d879SDavid Howells 
15c140d879SDavid Howells #include <linux/compiler.h>
16c140d879SDavid Howells 
17c140d879SDavid Howells /*
18c140d879SDavid Howells  * Macros to force memory ordering.  In these descriptions, "previous"
19c140d879SDavid Howells  * and "subsequent" refer to program order; "visible" means that all
20c140d879SDavid Howells  * architecturally visible effects of a memory access have occurred
21c140d879SDavid Howells  * (at a minimum, this means the memory has been read or written).
22c140d879SDavid Howells  *
23c140d879SDavid Howells  *   wmb():	Guarantees that all preceding stores to memory-
24c140d879SDavid Howells  *		like regions are visible before any subsequent
25c140d879SDavid Howells  *		stores and that all following stores will be
26c140d879SDavid Howells  *		visible only after all previous stores.
27c140d879SDavid Howells  *   rmb():	Like wmb(), but for reads.
28c140d879SDavid Howells  *   mb():	wmb()/rmb() combo, i.e., all previous memory
29c140d879SDavid Howells  *		accesses are visible before all subsequent
30c140d879SDavid Howells  *		accesses and vice versa.  This is also known as
31c140d879SDavid Howells  *		a "fence."
32c140d879SDavid Howells  *
33c140d879SDavid Howells  * Note: "mb()" and its variants cannot be used as a fence to order
34c140d879SDavid Howells  * accesses to memory mapped I/O registers.  For that, mf.a needs to
35c140d879SDavid Howells  * be used.  However, we don't want to always use mf.a because (a)
36c140d879SDavid Howells  * it's (presumably) much slower than mf and (b) mf.a is supported for
37c140d879SDavid Howells  * sequential memory pages only.
38c140d879SDavid Howells  */
39c140d879SDavid Howells #define mb()		ia64_mf()
40c140d879SDavid Howells #define rmb()		mb()
41c140d879SDavid Howells #define wmb()		mb()
42c140d879SDavid Howells 
431077fa36SAlexander Duyck #define dma_rmb()	mb()
441077fa36SAlexander Duyck #define dma_wmb()	mb()
451077fa36SAlexander Duyck 
46eebd1b92SMichael S. Tsirkin # define __smp_mb()	mb()
47c140d879SDavid Howells 
48eebd1b92SMichael S. Tsirkin #define __smp_mb__before_atomic()	barrier()
49eebd1b92SMichael S. Tsirkin #define __smp_mb__after_atomic()	barrier()
500cd64efbSPeter Zijlstra 
51c140d879SDavid Howells /*
5247933ad4SPeter Zijlstra  * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
5347933ad4SPeter Zijlstra  * need for asm trickery!
5447933ad4SPeter Zijlstra  */
5547933ad4SPeter Zijlstra 
56eebd1b92SMichael S. Tsirkin #define __smp_store_release(p, v)						\
5747933ad4SPeter Zijlstra do {									\
5847933ad4SPeter Zijlstra 	compiletime_assert_atomic_type(*p);				\
5947933ad4SPeter Zijlstra 	barrier();							\
6076695af2SAndrey Konovalov 	WRITE_ONCE(*p, v);						\
6147933ad4SPeter Zijlstra } while (0)
6247933ad4SPeter Zijlstra 
63eebd1b92SMichael S. Tsirkin #define __smp_load_acquire(p)						\
6447933ad4SPeter Zijlstra ({									\
6576695af2SAndrey Konovalov 	typeof(*p) ___p1 = READ_ONCE(*p);				\
6647933ad4SPeter Zijlstra 	compiletime_assert_atomic_type(*p);				\
6747933ad4SPeter Zijlstra 	barrier();							\
6847933ad4SPeter Zijlstra 	___p1;								\
6947933ad4SPeter Zijlstra })
7047933ad4SPeter Zijlstra 
71c140d879SDavid Howells /*
72c140d879SDavid Howells  * The group barrier in front of the rsm & ssm are necessary to ensure
73c140d879SDavid Howells  * that none of the previous instructions in the same group are
74c140d879SDavid Howells  * affected by the rsm/ssm.
75c140d879SDavid Howells  */
76c140d879SDavid Howells 
7753a05ac1SMichael S. Tsirkin #include <asm-generic/barrier.h>
7853a05ac1SMichael S. Tsirkin 
79c140d879SDavid Howells #endif /* _ASM_IA64_BARRIER_H */
80