1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Generic barrier definitions. 4 * 5 * It should be possible to use these on really simple architectures, 6 * but it serves more as a starting point for new ports. 7 * 8 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 9 * Written by David Howells (dhowells@redhat.com) 10 */ 11 #ifndef __ASM_GENERIC_BARRIER_H 12 #define __ASM_GENERIC_BARRIER_H 13 14 #ifndef __ASSEMBLY__ 15 16 #include <asm/rwonce.h> 17 18 #ifndef nop 19 #define nop() asm volatile ("nop") 20 #endif 21 22 /* 23 * Force strict CPU ordering. And yes, this is required on UP too when we're 24 * talking to devices. 25 * 26 * Fall back to compiler barriers if nothing better is provided. 27 */ 28 29 #ifndef mb 30 #define mb() barrier() 31 #endif 32 33 #ifndef rmb 34 #define rmb() mb() 35 #endif 36 37 #ifndef wmb 38 #define wmb() mb() 39 #endif 40 41 #ifndef dma_rmb 42 #define dma_rmb() rmb() 43 #endif 44 45 #ifndef dma_wmb 46 #define dma_wmb() wmb() 47 #endif 48 49 #ifndef __smp_mb 50 #define __smp_mb() mb() 51 #endif 52 53 #ifndef __smp_rmb 54 #define __smp_rmb() rmb() 55 #endif 56 57 #ifndef __smp_wmb 58 #define __smp_wmb() wmb() 59 #endif 60 61 #ifdef CONFIG_SMP 62 63 #ifndef smp_mb 64 #define smp_mb() __smp_mb() 65 #endif 66 67 #ifndef smp_rmb 68 #define smp_rmb() __smp_rmb() 69 #endif 70 71 #ifndef smp_wmb 72 #define smp_wmb() __smp_wmb() 73 #endif 74 75 #else /* !CONFIG_SMP */ 76 77 #ifndef smp_mb 78 #define smp_mb() barrier() 79 #endif 80 81 #ifndef smp_rmb 82 #define smp_rmb() barrier() 83 #endif 84 85 #ifndef smp_wmb 86 #define smp_wmb() barrier() 87 #endif 88 89 #endif /* CONFIG_SMP */ 90 91 #ifndef __smp_store_mb 92 #define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0) 93 #endif 94 95 #ifndef __smp_mb__before_atomic 96 #define __smp_mb__before_atomic() __smp_mb() 97 #endif 98 99 #ifndef __smp_mb__after_atomic 100 #define __smp_mb__after_atomic() __smp_mb() 101 #endif 102 103 #ifndef __smp_store_release 104 #define __smp_store_release(p, v) \ 105 do { \ 106 compiletime_assert_atomic_type(*p); \ 107 __smp_mb(); \ 108 WRITE_ONCE(*p, v); \ 109 } while (0) 110 #endif 111 112 #ifndef __smp_load_acquire 113 #define __smp_load_acquire(p) \ 114 ({ \ 115 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ 116 compiletime_assert_atomic_type(*p); \ 117 __smp_mb(); \ 118 (typeof(*p))___p1; \ 119 }) 120 #endif 121 122 #ifdef CONFIG_SMP 123 124 #ifndef smp_store_mb 125 #define smp_store_mb(var, value) __smp_store_mb(var, value) 126 #endif 127 128 #ifndef smp_mb__before_atomic 129 #define smp_mb__before_atomic() __smp_mb__before_atomic() 130 #endif 131 132 #ifndef smp_mb__after_atomic 133 #define smp_mb__after_atomic() __smp_mb__after_atomic() 134 #endif 135 136 #ifndef smp_store_release 137 #define smp_store_release(p, v) __smp_store_release(p, v) 138 #endif 139 140 #ifndef smp_load_acquire 141 #define smp_load_acquire(p) __smp_load_acquire(p) 142 #endif 143 144 #else /* !CONFIG_SMP */ 145 146 #ifndef smp_store_mb 147 #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) 148 #endif 149 150 #ifndef smp_mb__before_atomic 151 #define smp_mb__before_atomic() barrier() 152 #endif 153 154 #ifndef smp_mb__after_atomic 155 #define smp_mb__after_atomic() barrier() 156 #endif 157 158 #ifndef smp_store_release 159 #define smp_store_release(p, v) \ 160 do { \ 161 compiletime_assert_atomic_type(*p); \ 162 barrier(); \ 163 WRITE_ONCE(*p, v); \ 164 } while (0) 165 #endif 166 167 #ifndef smp_load_acquire 168 #define smp_load_acquire(p) \ 169 ({ \ 170 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ 171 compiletime_assert_atomic_type(*p); \ 172 barrier(); \ 173 (typeof(*p))___p1; \ 174 }) 175 #endif 176 177 #endif /* CONFIG_SMP */ 178 179 /* Barriers for virtual machine guests when talking to an SMP host */ 180 #define virt_mb() __smp_mb() 181 #define virt_rmb() __smp_rmb() 182 #define virt_wmb() __smp_wmb() 183 #define virt_store_mb(var, value) __smp_store_mb(var, value) 184 #define virt_mb__before_atomic() __smp_mb__before_atomic() 185 #define virt_mb__after_atomic() __smp_mb__after_atomic() 186 #define virt_store_release(p, v) __smp_store_release(p, v) 187 #define virt_load_acquire(p) __smp_load_acquire(p) 188 189 /** 190 * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency 191 * 192 * A control dependency provides a LOAD->STORE order, the additional RMB 193 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, 194 * aka. (load)-ACQUIRE. 195 * 196 * Architectures that do not do load speculation can have this be barrier(). 197 */ 198 #ifndef smp_acquire__after_ctrl_dep 199 #define smp_acquire__after_ctrl_dep() smp_rmb() 200 #endif 201 202 /** 203 * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees 204 * @ptr: pointer to the variable to wait on 205 * @cond: boolean expression to wait for 206 * 207 * Equivalent to using READ_ONCE() on the condition variable. 208 * 209 * Due to C lacking lambda expressions we load the value of *ptr into a 210 * pre-named variable @VAL to be used in @cond. 211 */ 212 #ifndef smp_cond_load_relaxed 213 #define smp_cond_load_relaxed(ptr, cond_expr) ({ \ 214 typeof(ptr) __PTR = (ptr); \ 215 __unqual_scalar_typeof(*ptr) VAL; \ 216 for (;;) { \ 217 VAL = READ_ONCE(*__PTR); \ 218 if (cond_expr) \ 219 break; \ 220 cpu_relax(); \ 221 } \ 222 (typeof(*ptr))VAL; \ 223 }) 224 #endif 225 226 /** 227 * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering 228 * @ptr: pointer to the variable to wait on 229 * @cond: boolean expression to wait for 230 * 231 * Equivalent to using smp_load_acquire() on the condition variable but employs 232 * the control dependency of the wait to reduce the barrier on many platforms. 233 */ 234 #ifndef smp_cond_load_acquire 235 #define smp_cond_load_acquire(ptr, cond_expr) ({ \ 236 __unqual_scalar_typeof(*ptr) _val; \ 237 _val = smp_cond_load_relaxed(ptr, cond_expr); \ 238 smp_acquire__after_ctrl_dep(); \ 239 (typeof(*ptr))_val; \ 240 }) 241 #endif 242 243 /* 244 * pmem_wmb() ensures that all stores for which the modification 245 * are written to persistent storage by preceding instructions have 246 * updated persistent storage before any data access or data transfer 247 * caused by subsequent instructions is initiated. 248 */ 249 #ifndef pmem_wmb 250 #define pmem_wmb() wmb() 251 #endif 252 253 #endif /* !__ASSEMBLY__ */ 254 #endif /* __ASM_GENERIC_BARRIER_H */ 255