1 /* 2 * Based on arch/arm/include/asm/barrier.h 3 * 4 * Copyright (C) 2012 ARM Ltd. 5 * Copyright (C) 2013 Regents of the University of California 6 * Copyright (C) 2017 SiFive 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #ifndef _ASM_RISCV_BARRIER_H 22 #define _ASM_RISCV_BARRIER_H 23 24 #ifndef __ASSEMBLY__ 25 26 #define nop() __asm__ __volatile__ ("nop") 27 28 #define RISCV_FENCE(p, s) \ 29 __asm__ __volatile__ ("fence " #p "," #s : : : "memory") 30 31 /* These barriers need to enforce ordering on both devices or memory. */ 32 #define mb() RISCV_FENCE(iorw,iorw) 33 #define rmb() RISCV_FENCE(ir,ir) 34 #define wmb() RISCV_FENCE(ow,ow) 35 36 /* These barriers do not need to enforce ordering on devices, just memory. */ 37 #define __smp_mb() RISCV_FENCE(rw,rw) 38 #define __smp_rmb() RISCV_FENCE(r,r) 39 #define __smp_wmb() RISCV_FENCE(w,w) 40 41 #define __smp_store_release(p, v) \ 42 do { \ 43 compiletime_assert_atomic_type(*p); \ 44 RISCV_FENCE(rw,w); \ 45 WRITE_ONCE(*p, v); \ 46 } while (0) 47 48 #define __smp_load_acquire(p) \ 49 ({ \ 50 typeof(*p) ___p1 = READ_ONCE(*p); \ 51 compiletime_assert_atomic_type(*p); \ 52 RISCV_FENCE(r,rw); \ 53 ___p1; \ 54 }) 55 56 /* 57 * This is a very specific barrier: it's currently only used in two places in 58 * the kernel, both in the scheduler. See include/linux/spinlock.h for the two 59 * orderings it guarantees, but the "critical section is RCsc" guarantee 60 * mandates a barrier on RISC-V. The sequence looks like: 61 * 62 * lr.aq lock 63 * sc lock <= LOCKED 64 * smp_mb__after_spinlock() 65 * // critical section 66 * lr lock 67 * sc.rl lock <= UNLOCKED 68 * 69 * The AQ/RL pair provides a RCpc critical section, but there's not really any 70 * way we can take advantage of that here because the ordering is only enforced 71 * on that one lock. Thus, we're just doing a full fence. 72 */ 73 #define smp_mb__after_spinlock() RISCV_FENCE(rw,rw) 74 75 #include <asm-generic/barrier.h> 76 77 #endif /* __ASSEMBLY__ */ 78 79 #endif /* _ASM_RISCV_BARRIER_H */ 80