1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _TOOLS_LINUX_ASM_X86_BARRIER_H 3 #define _TOOLS_LINUX_ASM_X86_BARRIER_H 4 5 /* 6 * Copied from the Linux kernel sources, and also moving code 7 * out from tools/perf/perf-sys.h so as to make it be located 8 * in a place similar as in the kernel sources. 9 * 10 * Force strict CPU ordering. 11 * And yes, this is required on UP too when we're talking 12 * to devices. 13 */ 14 15 #if defined(__i386__) 16 /* 17 * Some non-Intel clones support out of order store. wmb() ceases to be a 18 * nop for these. 19 */ 20 #define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 21 #define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 22 #define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 23 #elif defined(__x86_64__) 24 #define mb() asm volatile("mfence":::"memory") 25 #define rmb() asm volatile("lfence":::"memory") 26 #define wmb() asm volatile("sfence" ::: "memory") 27 #endif 28 29 #if defined(__x86_64__) 30 #define smp_store_release(p, v) \ 31 do { \ 32 barrier(); \ 33 WRITE_ONCE(*p, v); \ 34 } while (0) 35 36 #define smp_load_acquire(p) \ 37 ({ \ 38 typeof(*p) ___p1 = READ_ONCE(*p); \ 39 barrier(); \ 40 ___p1; \ 41 }) 42 #endif /* defined(__x86_64__) */ 43 #endif /* _TOOLS_LINUX_ASM_X86_BARRIER_H */ 44