xref: /openbmc/linux/arch/ia64/include/asm/barrier.h (revision e2f1cf25)
1 /*
2  * Memory barrier definitions.  This is based on information published
3  * in the Processor Abstraction Layer and the System Abstraction Layer
4  * manual.
5  *
6  * Copyright (C) 1998-2003 Hewlett-Packard Co
7  *	David Mosberger-Tang <davidm@hpl.hp.com>
8  * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
9  * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
10  */
11 #ifndef _ASM_IA64_BARRIER_H
12 #define _ASM_IA64_BARRIER_H
13 
14 #include <linux/compiler.h>
15 
16 /*
17  * Macros to force memory ordering.  In these descriptions, "previous"
18  * and "subsequent" refer to program order; "visible" means that all
19  * architecturally visible effects of a memory access have occurred
20  * (at a minimum, this means the memory has been read or written).
21  *
22  *   wmb():	Guarantees that all preceding stores to memory-
23  *		like regions are visible before any subsequent
24  *		stores and that all following stores will be
25  *		visible only after all previous stores.
26  *   rmb():	Like wmb(), but for reads.
27  *   mb():	wmb()/rmb() combo, i.e., all previous memory
28  *		accesses are visible before all subsequent
29  *		accesses and vice versa.  This is also known as
30  *		a "fence."
31  *
32  * Note: "mb()" and its variants cannot be used as a fence to order
33  * accesses to memory mapped I/O registers.  For that, mf.a needs to
34  * be used.  However, we don't want to always use mf.a because (a)
35  * it's (presumably) much slower than mf and (b) mf.a is supported for
36  * sequential memory pages only.
37  */
38 #define mb()		ia64_mf()
39 #define rmb()		mb()
40 #define wmb()		mb()
41 
42 #define dma_rmb()	mb()
43 #define dma_wmb()	mb()
44 
45 #ifdef CONFIG_SMP
46 # define smp_mb()	mb()
47 #else
48 # define smp_mb()	barrier()
49 #endif
50 
51 #define smp_rmb()	smp_mb()
52 #define smp_wmb()	smp_mb()
53 
54 #define read_barrier_depends()		do { } while (0)
55 #define smp_read_barrier_depends()	do { } while (0)
56 
57 #define smp_mb__before_atomic()	barrier()
58 #define smp_mb__after_atomic()	barrier()
59 
60 /*
61  * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
62  * need for asm trickery!
63  */
64 
65 #define smp_store_release(p, v)						\
66 do {									\
67 	compiletime_assert_atomic_type(*p);				\
68 	barrier();							\
69 	ACCESS_ONCE(*p) = (v);						\
70 } while (0)
71 
72 #define smp_load_acquire(p)						\
73 ({									\
74 	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
75 	compiletime_assert_atomic_type(*p);				\
76 	barrier();							\
77 	___p1;								\
78 })
79 
80 #define smp_store_mb(var, value)	do { WRITE_ONCE(var, value); mb(); } while (0)
81 
82 /*
83  * The group barrier in front of the rsm & ssm are necessary to ensure
84  * that none of the previous instructions in the same group are
85  * affected by the rsm/ssm.
86  */
87 
88 #endif /* _ASM_IA64_BARRIER_H */
89