xref: /openbmc/linux/include/asm-generic/barrier.h (revision c845428b7a9157523103100806bc8130d64769c8)
1b4d0d230SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
293ea02bbSPeter Zijlstra /*
3739d875dSDavid Howells  * Generic barrier definitions.
4885df91cSDavid Howells  *
5885df91cSDavid Howells  * It should be possible to use these on really simple architectures,
6885df91cSDavid Howells  * but it serves more as a starting point for new ports.
7885df91cSDavid Howells  *
8885df91cSDavid Howells  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
9885df91cSDavid Howells  * Written by David Howells (dhowells@redhat.com)
10885df91cSDavid Howells  */
11885df91cSDavid Howells #ifndef __ASM_GENERIC_BARRIER_H
12885df91cSDavid Howells #define __ASM_GENERIC_BARRIER_H
13885df91cSDavid Howells 
14885df91cSDavid Howells #ifndef __ASSEMBLY__
15885df91cSDavid Howells 
163347acc6SArvind Sankar #include <linux/compiler.h>
17f948666dSMarco Elver #include <linux/kcsan-checks.h>
18e506ea45SWill Deacon #include <asm/rwonce.h>
1993ea02bbSPeter Zijlstra 
2093ea02bbSPeter Zijlstra #ifndef nop
21885df91cSDavid Howells #define nop()	asm volatile ("nop")
2293ea02bbSPeter Zijlstra #endif
23885df91cSDavid Howells 
24885df91cSDavid Howells /*
252505a51aSMarco Elver  * Architectures that want generic instrumentation can define __ prefixed
262505a51aSMarco Elver  * variants of all barriers.
272505a51aSMarco Elver  */
282505a51aSMarco Elver 
292505a51aSMarco Elver #ifdef __mb
302505a51aSMarco Elver #define mb()	do { kcsan_mb(); __mb(); } while (0)
312505a51aSMarco Elver #endif
322505a51aSMarco Elver 
332505a51aSMarco Elver #ifdef __rmb
342505a51aSMarco Elver #define rmb()	do { kcsan_rmb(); __rmb(); } while (0)
352505a51aSMarco Elver #endif
362505a51aSMarco Elver 
372505a51aSMarco Elver #ifdef __wmb
382505a51aSMarco Elver #define wmb()	do { kcsan_wmb(); __wmb(); } while (0)
392505a51aSMarco Elver #endif
402505a51aSMarco Elver 
41ed59dfd9SKefeng Wang #ifdef __dma_mb
42ed59dfd9SKefeng Wang #define dma_mb()	do { kcsan_mb(); __dma_mb(); } while (0)
43ed59dfd9SKefeng Wang #endif
44ed59dfd9SKefeng Wang 
452505a51aSMarco Elver #ifdef __dma_rmb
462505a51aSMarco Elver #define dma_rmb()	do { kcsan_rmb(); __dma_rmb(); } while (0)
472505a51aSMarco Elver #endif
482505a51aSMarco Elver 
492505a51aSMarco Elver #ifdef __dma_wmb
502505a51aSMarco Elver #define dma_wmb()	do { kcsan_wmb(); __dma_wmb(); } while (0)
512505a51aSMarco Elver #endif
522505a51aSMarco Elver 
532505a51aSMarco Elver /*
5493ea02bbSPeter Zijlstra  * Force strict CPU ordering. And yes, this is required on UP too when we're
5593ea02bbSPeter Zijlstra  * talking to devices.
56885df91cSDavid Howells  *
5793ea02bbSPeter Zijlstra  * Fall back to compiler barriers if nothing better is provided.
58885df91cSDavid Howells  */
59885df91cSDavid Howells 
6093ea02bbSPeter Zijlstra #ifndef mb
6193ea02bbSPeter Zijlstra #define mb()	barrier()
6293ea02bbSPeter Zijlstra #endif
6393ea02bbSPeter Zijlstra 
6493ea02bbSPeter Zijlstra #ifndef rmb
65885df91cSDavid Howells #define rmb()	mb()
6693ea02bbSPeter Zijlstra #endif
6793ea02bbSPeter Zijlstra 
6893ea02bbSPeter Zijlstra #ifndef wmb
6993ea02bbSPeter Zijlstra #define wmb()	mb()
7093ea02bbSPeter Zijlstra #endif
7193ea02bbSPeter Zijlstra 
72ed59dfd9SKefeng Wang #ifndef dma_mb
73ed59dfd9SKefeng Wang #define dma_mb()	mb()
74ed59dfd9SKefeng Wang #endif
75ed59dfd9SKefeng Wang 
761077fa36SAlexander Duyck #ifndef dma_rmb
771077fa36SAlexander Duyck #define dma_rmb()	rmb()
781077fa36SAlexander Duyck #endif
791077fa36SAlexander Duyck 
801077fa36SAlexander Duyck #ifndef dma_wmb
811077fa36SAlexander Duyck #define dma_wmb()	wmb()
821077fa36SAlexander Duyck #endif
831077fa36SAlexander Duyck 
84a9e4252aSMichael S. Tsirkin #ifndef __smp_mb
85a9e4252aSMichael S. Tsirkin #define __smp_mb()	mb()
86a9e4252aSMichael S. Tsirkin #endif
87a9e4252aSMichael S. Tsirkin 
88a9e4252aSMichael S. Tsirkin #ifndef __smp_rmb
89a9e4252aSMichael S. Tsirkin #define __smp_rmb()	rmb()
90a9e4252aSMichael S. Tsirkin #endif
91a9e4252aSMichael S. Tsirkin 
92a9e4252aSMichael S. Tsirkin #ifndef __smp_wmb
93a9e4252aSMichael S. Tsirkin #define __smp_wmb()	wmb()
94a9e4252aSMichael S. Tsirkin #endif
95a9e4252aSMichael S. Tsirkin 
96885df91cSDavid Howells #ifdef CONFIG_SMP
97470c27e4SVineet Gupta 
98470c27e4SVineet Gupta #ifndef smp_mb
99f948666dSMarco Elver #define smp_mb()	do { kcsan_mb(); __smp_mb(); } while (0)
100470c27e4SVineet Gupta #endif
101470c27e4SVineet Gupta 
102470c27e4SVineet Gupta #ifndef smp_rmb
103f948666dSMarco Elver #define smp_rmb()	do { kcsan_rmb(); __smp_rmb(); } while (0)
104470c27e4SVineet Gupta #endif
105470c27e4SVineet Gupta 
106470c27e4SVineet Gupta #ifndef smp_wmb
107f948666dSMarco Elver #define smp_wmb()	do { kcsan_wmb(); __smp_wmb(); } while (0)
108470c27e4SVineet Gupta #endif
109470c27e4SVineet Gupta 
1100890a264SLinus Torvalds #else	/* !CONFIG_SMP */
1110890a264SLinus Torvalds 
112470c27e4SVineet Gupta #ifndef smp_mb
113885df91cSDavid Howells #define smp_mb()	barrier()
114470c27e4SVineet Gupta #endif
115470c27e4SVineet Gupta 
116470c27e4SVineet Gupta #ifndef smp_rmb
117885df91cSDavid Howells #define smp_rmb()	barrier()
118470c27e4SVineet Gupta #endif
119470c27e4SVineet Gupta 
120470c27e4SVineet Gupta #ifndef smp_wmb
121885df91cSDavid Howells #define smp_wmb()	barrier()
122470c27e4SVineet Gupta #endif
123470c27e4SVineet Gupta 
1240890a264SLinus Torvalds #endif	/* CONFIG_SMP */
125470c27e4SVineet Gupta 
126a9e4252aSMichael S. Tsirkin #ifndef __smp_store_mb
127a9e4252aSMichael S. Tsirkin #define __smp_store_mb(var, value)  do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
128a9e4252aSMichael S. Tsirkin #endif
129a9e4252aSMichael S. Tsirkin 
130a9e4252aSMichael S. Tsirkin #ifndef __smp_mb__before_atomic
131a9e4252aSMichael S. Tsirkin #define __smp_mb__before_atomic()	__smp_mb()
132a9e4252aSMichael S. Tsirkin #endif
133a9e4252aSMichael S. Tsirkin 
134a9e4252aSMichael S. Tsirkin #ifndef __smp_mb__after_atomic
135a9e4252aSMichael S. Tsirkin #define __smp_mb__after_atomic()	__smp_mb()
136a9e4252aSMichael S. Tsirkin #endif
137a9e4252aSMichael S. Tsirkin 
138a9e4252aSMichael S. Tsirkin #ifndef __smp_store_release
139a9e4252aSMichael S. Tsirkin #define __smp_store_release(p, v)					\
140a9e4252aSMichael S. Tsirkin do {									\
141a9e4252aSMichael S. Tsirkin 	compiletime_assert_atomic_type(*p);				\
142a9e4252aSMichael S. Tsirkin 	__smp_mb();							\
143a9e4252aSMichael S. Tsirkin 	WRITE_ONCE(*p, v);						\
144a9e4252aSMichael S. Tsirkin } while (0)
145a9e4252aSMichael S. Tsirkin #endif
146a9e4252aSMichael S. Tsirkin 
147a9e4252aSMichael S. Tsirkin #ifndef __smp_load_acquire
148a9e4252aSMichael S. Tsirkin #define __smp_load_acquire(p)						\
149a9e4252aSMichael S. Tsirkin ({									\
15054988727SWill Deacon 	__unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p);		\
151a9e4252aSMichael S. Tsirkin 	compiletime_assert_atomic_type(*p);				\
152a9e4252aSMichael S. Tsirkin 	__smp_mb();							\
15354988727SWill Deacon 	(typeof(*p))___p1;						\
154a9e4252aSMichael S. Tsirkin })
155a9e4252aSMichael S. Tsirkin #endif
156a9e4252aSMichael S. Tsirkin 
157a9e4252aSMichael S. Tsirkin #ifdef CONFIG_SMP
158a9e4252aSMichael S. Tsirkin 
159b92b8b35SPeter Zijlstra #ifndef smp_store_mb
160f948666dSMarco Elver #define smp_store_mb(var, value)  do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
16193ea02bbSPeter Zijlstra #endif
162885df91cSDavid Howells 
163febdbfe8SPeter Zijlstra #ifndef smp_mb__before_atomic
164f948666dSMarco Elver #define smp_mb__before_atomic()	do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
165febdbfe8SPeter Zijlstra #endif
166febdbfe8SPeter Zijlstra 
167febdbfe8SPeter Zijlstra #ifndef smp_mb__after_atomic
168f948666dSMarco Elver #define smp_mb__after_atomic()	do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
169a9e4252aSMichael S. Tsirkin #endif
170a9e4252aSMichael S. Tsirkin 
171a9e4252aSMichael S. Tsirkin #ifndef smp_store_release
172f948666dSMarco Elver #define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
173a9e4252aSMichael S. Tsirkin #endif
174a9e4252aSMichael S. Tsirkin 
175a9e4252aSMichael S. Tsirkin #ifndef smp_load_acquire
176a9e4252aSMichael S. Tsirkin #define smp_load_acquire(p) __smp_load_acquire(p)
177a9e4252aSMichael S. Tsirkin #endif
178a9e4252aSMichael S. Tsirkin 
179a9e4252aSMichael S. Tsirkin #else	/* !CONFIG_SMP */
180a9e4252aSMichael S. Tsirkin 
181a9e4252aSMichael S. Tsirkin #ifndef smp_store_mb
182a9e4252aSMichael S. Tsirkin #define smp_store_mb(var, value)  do { WRITE_ONCE(var, value); barrier(); } while (0)
183a9e4252aSMichael S. Tsirkin #endif
184a9e4252aSMichael S. Tsirkin 
185a9e4252aSMichael S. Tsirkin #ifndef smp_mb__before_atomic
186a9e4252aSMichael S. Tsirkin #define smp_mb__before_atomic()	barrier()
187a9e4252aSMichael S. Tsirkin #endif
188a9e4252aSMichael S. Tsirkin 
189a9e4252aSMichael S. Tsirkin #ifndef smp_mb__after_atomic
190a9e4252aSMichael S. Tsirkin #define smp_mb__after_atomic()	barrier()
191febdbfe8SPeter Zijlstra #endif
192febdbfe8SPeter Zijlstra 
19357f7c037SMichael S. Tsirkin #ifndef smp_store_release
19447933ad4SPeter Zijlstra #define smp_store_release(p, v)						\
19547933ad4SPeter Zijlstra do {									\
19647933ad4SPeter Zijlstra 	compiletime_assert_atomic_type(*p);				\
197a9e4252aSMichael S. Tsirkin 	barrier();							\
19876695af2SAndrey Konovalov 	WRITE_ONCE(*p, v);						\
19947933ad4SPeter Zijlstra } while (0)
20057f7c037SMichael S. Tsirkin #endif
20147933ad4SPeter Zijlstra 
20257f7c037SMichael S. Tsirkin #ifndef smp_load_acquire
20347933ad4SPeter Zijlstra #define smp_load_acquire(p)						\
20447933ad4SPeter Zijlstra ({									\
20554988727SWill Deacon 	__unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p);		\
20647933ad4SPeter Zijlstra 	compiletime_assert_atomic_type(*p);				\
207a9e4252aSMichael S. Tsirkin 	barrier();							\
20854988727SWill Deacon 	(typeof(*p))___p1;						\
20947933ad4SPeter Zijlstra })
21057f7c037SMichael S. Tsirkin #endif
21147933ad4SPeter Zijlstra 
212726328d9SPeter Zijlstra #endif	/* CONFIG_SMP */
213a9e4252aSMichael S. Tsirkin 
2146a65d263SMichael S. Tsirkin /* Barriers for virtual machine guests when talking to an SMP host */
215f948666dSMarco Elver #define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0)
216f948666dSMarco Elver #define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
217f948666dSMarco Elver #define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
218f948666dSMarco Elver #define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
219f948666dSMarco Elver #define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
220f948666dSMarco Elver #define virt_mb__after_atomic()	do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
221f948666dSMarco Elver #define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
2226a65d263SMichael S. Tsirkin #define virt_load_acquire(p) __smp_load_acquire(p)
2236a65d263SMichael S. Tsirkin 
2247cb45c0fSPeter Zijlstra /**
2257cb45c0fSPeter Zijlstra  * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
2267cb45c0fSPeter Zijlstra  *
2277cb45c0fSPeter Zijlstra  * A control dependency provides a LOAD->STORE order, the additional RMB
2287cb45c0fSPeter Zijlstra  * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
2297cb45c0fSPeter Zijlstra  * aka. (load)-ACQUIRE.
2307cb45c0fSPeter Zijlstra  *
2317cb45c0fSPeter Zijlstra  * Architectures that do not do load speculation can have this be barrier().
2327cb45c0fSPeter Zijlstra  */
2337cb45c0fSPeter Zijlstra #ifndef smp_acquire__after_ctrl_dep
2347cb45c0fSPeter Zijlstra #define smp_acquire__after_ctrl_dep()		smp_rmb()
2357cb45c0fSPeter Zijlstra #endif
2367cb45c0fSPeter Zijlstra 
2377cb45c0fSPeter Zijlstra /**
238fcfdfe30SWill Deacon  * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees
2397cb45c0fSPeter Zijlstra  * @ptr: pointer to the variable to wait on
2407cb45c0fSPeter Zijlstra  * @cond: boolean expression to wait for
2417cb45c0fSPeter Zijlstra  *
242fcfdfe30SWill Deacon  * Equivalent to using READ_ONCE() on the condition variable.
2437cb45c0fSPeter Zijlstra  *
2447cb45c0fSPeter Zijlstra  * Due to C lacking lambda expressions we load the value of *ptr into a
2457cb45c0fSPeter Zijlstra  * pre-named variable @VAL to be used in @cond.
2467cb45c0fSPeter Zijlstra  */
247fcfdfe30SWill Deacon #ifndef smp_cond_load_relaxed
248fcfdfe30SWill Deacon #define smp_cond_load_relaxed(ptr, cond_expr) ({		\
2497cb45c0fSPeter Zijlstra 	typeof(ptr) __PTR = (ptr);				\
25054988727SWill Deacon 	__unqual_scalar_typeof(*ptr) VAL;			\
2517cb45c0fSPeter Zijlstra 	for (;;) {						\
2527cb45c0fSPeter Zijlstra 		VAL = READ_ONCE(*__PTR);			\
2537cb45c0fSPeter Zijlstra 		if (cond_expr)					\
2547cb45c0fSPeter Zijlstra 			break;					\
2557cb45c0fSPeter Zijlstra 		cpu_relax();					\
2567cb45c0fSPeter Zijlstra 	}							\
25754988727SWill Deacon 	(typeof(*ptr))VAL;					\
2587cb45c0fSPeter Zijlstra })
2597cb45c0fSPeter Zijlstra #endif
2607cb45c0fSPeter Zijlstra 
261fcfdfe30SWill Deacon /**
262fcfdfe30SWill Deacon  * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
263fcfdfe30SWill Deacon  * @ptr: pointer to the variable to wait on
264fcfdfe30SWill Deacon  * @cond: boolean expression to wait for
265fcfdfe30SWill Deacon  *
266fcfdfe30SWill Deacon  * Equivalent to using smp_load_acquire() on the condition variable but employs
267fcfdfe30SWill Deacon  * the control dependency of the wait to reduce the barrier on many platforms.
268fcfdfe30SWill Deacon  */
269fcfdfe30SWill Deacon #ifndef smp_cond_load_acquire
270fcfdfe30SWill Deacon #define smp_cond_load_acquire(ptr, cond_expr) ({		\
27154988727SWill Deacon 	__unqual_scalar_typeof(*ptr) _val;			\
272fcfdfe30SWill Deacon 	_val = smp_cond_load_relaxed(ptr, cond_expr);		\
273fcfdfe30SWill Deacon 	smp_acquire__after_ctrl_dep();				\
27454988727SWill Deacon 	(typeof(*ptr))_val;					\
275fcfdfe30SWill Deacon })
276fcfdfe30SWill Deacon #endif
277fcfdfe30SWill Deacon 
2783e79f082SAneesh Kumar K.V /*
2793e79f082SAneesh Kumar K.V  * pmem_wmb() ensures that all stores for which the modification
2803e79f082SAneesh Kumar K.V  * are written to persistent storage by preceding instructions have
2813e79f082SAneesh Kumar K.V  * updated persistent storage before any data  access or data transfer
2823e79f082SAneesh Kumar K.V  * caused by subsequent instructions is initiated.
2833e79f082SAneesh Kumar K.V  */
2843e79f082SAneesh Kumar K.V #ifndef pmem_wmb
2853e79f082SAneesh Kumar K.V #define pmem_wmb()	wmb()
2863e79f082SAneesh Kumar K.V #endif
2873e79f082SAneesh Kumar K.V 
288d5624bb2SXiongfeng Wang /*
289d5624bb2SXiongfeng Wang  * ioremap_wc() maps I/O memory as memory with write-combining attributes. For
290d5624bb2SXiongfeng Wang  * this kind of memory accesses, the CPU may wait for prior accesses to be
291d5624bb2SXiongfeng Wang  * merged with subsequent ones. In some situation, such wait is bad for the
292d5624bb2SXiongfeng Wang  * performance. io_stop_wc() can be used to prevent the merging of
293d5624bb2SXiongfeng Wang  * write-combining memory accesses before this macro with those after it.
294d5624bb2SXiongfeng Wang  */
295d5624bb2SXiongfeng Wang #ifndef io_stop_wc
296440323b6SXiongfeng Wang #define io_stop_wc() do { } while (0)
297d5624bb2SXiongfeng Wang #endif
298d5624bb2SXiongfeng Wang 
299*7fce9f0fSMathieu Desnoyers /*
300*7fce9f0fSMathieu Desnoyers  * Architectures that guarantee an implicit smp_mb() in switch_mm()
301*7fce9f0fSMathieu Desnoyers  * can override smp_mb__after_switch_mm.
302*7fce9f0fSMathieu Desnoyers  */
303*7fce9f0fSMathieu Desnoyers #ifndef smp_mb__after_switch_mm
304*7fce9f0fSMathieu Desnoyers # define smp_mb__after_switch_mm()	smp_mb()
305*7fce9f0fSMathieu Desnoyers #endif
306*7fce9f0fSMathieu Desnoyers 
307885df91cSDavid Howells #endif /* !__ASSEMBLY__ */
308885df91cSDavid Howells #endif /* __ASM_GENERIC_BARRIER_H */
309