1 /* 2 * Based on arch/arm/include/asm/barrier.h 3 * 4 * Copyright (C) 2012 ARM Ltd. 5 * Copyright (C) 2013 Regents of the University of California 6 * Copyright (C) 2017 SiFive 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #ifndef _ASM_RISCV_BARRIER_H 22 #define _ASM_RISCV_BARRIER_H 23 24 #ifndef __ASSEMBLY__ 25 26 #define nop() __asm__ __volatile__ ("nop") 27 28 #define RISCV_FENCE(p, s) \ 29 __asm__ __volatile__ ("fence " #p "," #s : : : "memory") 30 31 /* These barriers need to enforce ordering on both devices or memory. */ 32 #define mb() RISCV_FENCE(iorw,iorw) 33 #define rmb() RISCV_FENCE(ir,ir) 34 #define wmb() RISCV_FENCE(ow,ow) 35 36 /* These barriers do not need to enforce ordering on devices, just memory. */ 37 #define smp_mb() RISCV_FENCE(rw,rw) 38 #define smp_rmb() RISCV_FENCE(r,r) 39 #define smp_wmb() RISCV_FENCE(w,w) 40 41 /* 42 * These fences exist to enforce ordering around the relaxed AMOs. The 43 * documentation defines that 44 * " 45 * atomic_fetch_add(); 46 * is equivalent to: 47 * smp_mb__before_atomic(); 48 * atomic_fetch_add_relaxed(); 49 * smp_mb__after_atomic(); 50 * " 51 * So we emit full fences on both sides. 52 */ 53 #define __smb_mb__before_atomic() smp_mb() 54 #define __smb_mb__after_atomic() smp_mb() 55 56 /* 57 * These barriers prevent accesses performed outside a spinlock from being moved 58 * inside a spinlock. Since RISC-V sets the aq/rl bits on our spinlock only 59 * enforce release consistency, we need full fences here. 60 */ 61 #define smb_mb__before_spinlock() smp_mb() 62 #define smb_mb__after_spinlock() smp_mb() 63 64 #include <asm-generic/barrier.h> 65 66 #endif /* __ASSEMBLY__ */ 67 68 #endif /* _ASM_RISCV_BARRIER_H */ 69