1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Generic barrier definitions. 4 * 5 * It should be possible to use these on really simple architectures, 6 * but it serves more as a starting point for new ports. 7 * 8 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 9 * Written by David Howells (dhowells@redhat.com) 10 */ 11 #ifndef __ASM_GENERIC_BARRIER_H 12 #define __ASM_GENERIC_BARRIER_H 13 14 #ifndef __ASSEMBLY__ 15 16 #include <linux/compiler.h> 17 18 #ifndef nop 19 #define nop() asm volatile ("nop") 20 #endif 21 22 /* 23 * Force strict CPU ordering. And yes, this is required on UP too when we're 24 * talking to devices. 25 * 26 * Fall back to compiler barriers if nothing better is provided. 27 */ 28 29 #ifndef mb 30 #define mb() barrier() 31 #endif 32 33 #ifndef rmb 34 #define rmb() mb() 35 #endif 36 37 #ifndef wmb 38 #define wmb() mb() 39 #endif 40 41 #ifndef dma_rmb 42 #define dma_rmb() rmb() 43 #endif 44 45 #ifndef dma_wmb 46 #define dma_wmb() wmb() 47 #endif 48 49 #ifndef read_barrier_depends 50 #define read_barrier_depends() do { } while (0) 51 #endif 52 53 #ifndef __smp_mb 54 #define __smp_mb() mb() 55 #endif 56 57 #ifndef __smp_rmb 58 #define __smp_rmb() rmb() 59 #endif 60 61 #ifndef __smp_wmb 62 #define __smp_wmb() wmb() 63 #endif 64 65 #ifndef __smp_read_barrier_depends 66 #define __smp_read_barrier_depends() read_barrier_depends() 67 #endif 68 69 #ifdef CONFIG_SMP 70 71 #ifndef smp_mb 72 #define smp_mb() __smp_mb() 73 #endif 74 75 #ifndef smp_rmb 76 #define smp_rmb() __smp_rmb() 77 #endif 78 79 #ifndef smp_wmb 80 #define smp_wmb() __smp_wmb() 81 #endif 82 83 #ifndef smp_read_barrier_depends 84 #define smp_read_barrier_depends() __smp_read_barrier_depends() 85 #endif 86 87 #else /* !CONFIG_SMP */ 88 89 #ifndef smp_mb 90 #define smp_mb() barrier() 91 #endif 92 93 #ifndef smp_rmb 94 #define smp_rmb() barrier() 95 #endif 96 97 #ifndef smp_wmb 98 #define smp_wmb() barrier() 99 #endif 100 101 #ifndef smp_read_barrier_depends 102 #define smp_read_barrier_depends() do { } while (0) 103 #endif 104 105 #endif /* CONFIG_SMP */ 106 107 #ifndef __smp_store_mb 108 #define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0) 109 #endif 110 111 #ifndef __smp_mb__before_atomic 112 #define __smp_mb__before_atomic() __smp_mb() 113 #endif 114 115 #ifndef __smp_mb__after_atomic 116 #define __smp_mb__after_atomic() __smp_mb() 117 #endif 118 119 #ifndef __smp_store_release 120 #define __smp_store_release(p, v) \ 121 do { \ 122 compiletime_assert_atomic_type(*p); \ 123 __smp_mb(); \ 124 WRITE_ONCE(*p, v); \ 125 } while (0) 126 #endif 127 128 #ifndef __smp_load_acquire 129 #define __smp_load_acquire(p) \ 130 ({ \ 131 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ 132 compiletime_assert_atomic_type(*p); \ 133 __smp_mb(); \ 134 (typeof(*p))___p1; \ 135 }) 136 #endif 137 138 #ifdef CONFIG_SMP 139 140 #ifndef smp_store_mb 141 #define smp_store_mb(var, value) __smp_store_mb(var, value) 142 #endif 143 144 #ifndef smp_mb__before_atomic 145 #define smp_mb__before_atomic() __smp_mb__before_atomic() 146 #endif 147 148 #ifndef smp_mb__after_atomic 149 #define smp_mb__after_atomic() __smp_mb__after_atomic() 150 #endif 151 152 #ifndef smp_store_release 153 #define smp_store_release(p, v) __smp_store_release(p, v) 154 #endif 155 156 #ifndef smp_load_acquire 157 #define smp_load_acquire(p) __smp_load_acquire(p) 158 #endif 159 160 #else /* !CONFIG_SMP */ 161 162 #ifndef smp_store_mb 163 #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) 164 #endif 165 166 #ifndef smp_mb__before_atomic 167 #define smp_mb__before_atomic() barrier() 168 #endif 169 170 #ifndef smp_mb__after_atomic 171 #define smp_mb__after_atomic() barrier() 172 #endif 173 174 #ifndef smp_store_release 175 #define smp_store_release(p, v) \ 176 do { \ 177 compiletime_assert_atomic_type(*p); \ 178 barrier(); \ 179 WRITE_ONCE(*p, v); \ 180 } while (0) 181 #endif 182 183 #ifndef smp_load_acquire 184 #define smp_load_acquire(p) \ 185 ({ \ 186 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ 187 compiletime_assert_atomic_type(*p); \ 188 barrier(); \ 189 (typeof(*p))___p1; \ 190 }) 191 #endif 192 193 #endif /* CONFIG_SMP */ 194 195 /* Barriers for virtual machine guests when talking to an SMP host */ 196 #define virt_mb() __smp_mb() 197 #define virt_rmb() __smp_rmb() 198 #define virt_wmb() __smp_wmb() 199 #define virt_read_barrier_depends() __smp_read_barrier_depends() 200 #define virt_store_mb(var, value) __smp_store_mb(var, value) 201 #define virt_mb__before_atomic() __smp_mb__before_atomic() 202 #define virt_mb__after_atomic() __smp_mb__after_atomic() 203 #define virt_store_release(p, v) __smp_store_release(p, v) 204 #define virt_load_acquire(p) __smp_load_acquire(p) 205 206 /** 207 * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency 208 * 209 * A control dependency provides a LOAD->STORE order, the additional RMB 210 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, 211 * aka. (load)-ACQUIRE. 212 * 213 * Architectures that do not do load speculation can have this be barrier(). 214 */ 215 #ifndef smp_acquire__after_ctrl_dep 216 #define smp_acquire__after_ctrl_dep() smp_rmb() 217 #endif 218 219 /** 220 * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees 221 * @ptr: pointer to the variable to wait on 222 * @cond: boolean expression to wait for 223 * 224 * Equivalent to using READ_ONCE() on the condition variable. 225 * 226 * Due to C lacking lambda expressions we load the value of *ptr into a 227 * pre-named variable @VAL to be used in @cond. 228 */ 229 #ifndef smp_cond_load_relaxed 230 #define smp_cond_load_relaxed(ptr, cond_expr) ({ \ 231 typeof(ptr) __PTR = (ptr); \ 232 __unqual_scalar_typeof(*ptr) VAL; \ 233 for (;;) { \ 234 VAL = READ_ONCE(*__PTR); \ 235 if (cond_expr) \ 236 break; \ 237 cpu_relax(); \ 238 } \ 239 (typeof(*ptr))VAL; \ 240 }) 241 #endif 242 243 /** 244 * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering 245 * @ptr: pointer to the variable to wait on 246 * @cond: boolean expression to wait for 247 * 248 * Equivalent to using smp_load_acquire() on the condition variable but employs 249 * the control dependency of the wait to reduce the barrier on many platforms. 250 */ 251 #ifndef smp_cond_load_acquire 252 #define smp_cond_load_acquire(ptr, cond_expr) ({ \ 253 __unqual_scalar_typeof(*ptr) _val; \ 254 _val = smp_cond_load_relaxed(ptr, cond_expr); \ 255 smp_acquire__after_ctrl_dep(); \ 256 (typeof(*ptr))_val; \ 257 }) 258 #endif 259 260 /* 261 * pmem_wmb() ensures that all stores for which the modification 262 * are written to persistent storage by preceding instructions have 263 * updated persistent storage before any data access or data transfer 264 * caused by subsequent instructions is initiated. 265 */ 266 #ifndef pmem_wmb 267 #define pmem_wmb() wmb() 268 #endif 269 270 #endif /* !__ASSEMBLY__ */ 271 #endif /* __ASM_GENERIC_BARRIER_H */ 272