xref: /openbmc/linux/arch/powerpc/include/asm/barrier.h (revision d2c43ff1)
1 /*
2  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3  */
4 #ifndef _ASM_POWERPC_BARRIER_H
5 #define _ASM_POWERPC_BARRIER_H
6 
7 /*
8  * Memory barrier.
9  * The sync instruction guarantees that all memory accesses initiated
10  * by this processor have been performed (with respect to all other
11  * mechanisms that access memory).  The eieio instruction is a barrier
12  * providing an ordering (separately) for (a) cacheable stores and (b)
13  * loads and stores to non-cacheable memory (e.g. I/O devices).
14  *
15  * mb() prevents loads and stores being reordered across this point.
16  * rmb() prevents loads being reordered across this point.
17  * wmb() prevents stores being reordered across this point.
18  * read_barrier_depends() prevents data-dependent loads being reordered
19  *	across this point (nop on PPC).
20  *
21  * *mb() variants without smp_ prefix must order all types of memory
22  * operations with one another. sync is the only instruction sufficient
23  * to do this.
24  *
25  * For the smp_ barriers, ordering is for cacheable memory operations
26  * only. We have to use the sync instruction for smp_mb(), since lwsync
27  * doesn't order loads with respect to previous stores.  Lwsync can be
28  * used for smp_rmb() and smp_wmb().
29  *
30  * However, on CPUs that don't support lwsync, lwsync actually maps to a
31  * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
32  */
33 #define mb()   __asm__ __volatile__ ("sync" : : : "memory")
34 #define rmb()  __asm__ __volatile__ ("sync" : : : "memory")
35 #define wmb()  __asm__ __volatile__ ("sync" : : : "memory")
36 
37 #ifdef __SUBARCH_HAS_LWSYNC
38 #    define SMPWMB      LWSYNC
39 #else
40 #    define SMPWMB      eieio
41 #endif
42 
43 #define __lwsync()	__asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
44 #define dma_rmb()	__lwsync()
45 #define dma_wmb()	__asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
46 
47 #define __smp_lwsync()	__lwsync()
48 
49 #define __smp_mb()	mb()
50 #define __smp_rmb()	__lwsync()
51 #define __smp_wmb()	__asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
52 
53 /*
54  * This is a barrier which prevents following instructions from being
55  * started until the value of the argument x is known.  For example, if
56  * x is a variable loaded from memory, this prevents following
57  * instructions from being executed until the load has been performed.
58  */
59 #define data_barrier(x)	\
60 	asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
61 
62 #define __smp_store_release(p, v)						\
63 do {									\
64 	compiletime_assert_atomic_type(*p);				\
65 	__smp_lwsync();							\
66 	WRITE_ONCE(*p, v);						\
67 } while (0)
68 
69 #define __smp_load_acquire(p)						\
70 ({									\
71 	typeof(*p) ___p1 = READ_ONCE(*p);				\
72 	compiletime_assert_atomic_type(*p);				\
73 	__smp_lwsync();							\
74 	___p1;								\
75 })
76 
77 /*
78  * This must resolve to hwsync on SMP for the context switch path.
79  * See _switch, and core scheduler context switch memory ordering
80  * comments.
81  */
82 #define smp_mb__before_spinlock()   smp_mb()
83 
84 #include <asm-generic/barrier.h>
85 
86 #endif /* _ASM_POWERPC_BARRIER_H */
87