xref: /openbmc/linux/arch/ia64/include/asm/barrier.h (revision bc5aa3a0)
1 /*
2  * Memory barrier definitions.  This is based on information published
3  * in the Processor Abstraction Layer and the System Abstraction Layer
4  * manual.
5  *
6  * Copyright (C) 1998-2003 Hewlett-Packard Co
7  *	David Mosberger-Tang <davidm@hpl.hp.com>
8  * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
9  * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
10  */
11 #ifndef _ASM_IA64_BARRIER_H
12 #define _ASM_IA64_BARRIER_H
13 
14 #include <linux/compiler.h>
15 
16 /*
17  * Macros to force memory ordering.  In these descriptions, "previous"
18  * and "subsequent" refer to program order; "visible" means that all
19  * architecturally visible effects of a memory access have occurred
20  * (at a minimum, this means the memory has been read or written).
21  *
22  *   wmb():	Guarantees that all preceding stores to memory-
23  *		like regions are visible before any subsequent
24  *		stores and that all following stores will be
25  *		visible only after all previous stores.
26  *   rmb():	Like wmb(), but for reads.
27  *   mb():	wmb()/rmb() combo, i.e., all previous memory
28  *		accesses are visible before all subsequent
29  *		accesses and vice versa.  This is also known as
30  *		a "fence."
31  *
32  * Note: "mb()" and its variants cannot be used as a fence to order
33  * accesses to memory mapped I/O registers.  For that, mf.a needs to
34  * be used.  However, we don't want to always use mf.a because (a)
35  * it's (presumably) much slower than mf and (b) mf.a is supported for
36  * sequential memory pages only.
37  */
38 #define mb()		ia64_mf()
39 #define rmb()		mb()
40 #define wmb()		mb()
41 
42 #define dma_rmb()	mb()
43 #define dma_wmb()	mb()
44 
45 # define __smp_mb()	mb()
46 
47 #define __smp_mb__before_atomic()	barrier()
48 #define __smp_mb__after_atomic()	barrier()
49 
50 /*
51  * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
52  * need for asm trickery!
53  */
54 
55 #define __smp_store_release(p, v)						\
56 do {									\
57 	compiletime_assert_atomic_type(*p);				\
58 	barrier();							\
59 	WRITE_ONCE(*p, v);						\
60 } while (0)
61 
62 #define __smp_load_acquire(p)						\
63 ({									\
64 	typeof(*p) ___p1 = READ_ONCE(*p);				\
65 	compiletime_assert_atomic_type(*p);				\
66 	barrier();							\
67 	___p1;								\
68 })
69 
70 /*
71  * The group barrier in front of the rsm & ssm are necessary to ensure
72  * that none of the previous instructions in the same group are
73  * affected by the rsm/ssm.
74  */
75 
76 #include <asm-generic/barrier.h>
77 
78 #endif /* _ASM_IA64_BARRIER_H */
79