1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) 4 */ 5 6 #ifndef __ASM_BARRIER_H 7 #define __ASM_BARRIER_H 8 9 #ifdef CONFIG_ISA_ARCV2 10 11 /* 12 * ARCv2 based HS38 cores are in-order issue, but still weakly ordered 13 * due to micro-arch buffering/queuing of load/store, cache hit vs. miss ... 14 * 15 * Explicit barrier provided by DMB instruction 16 * - Operand supports fine grained load/store/load+store semantics 17 * - Ensures that selected memory operation issued before it will complete 18 * before any subsequent memory operation of same type 19 * - DMB guarantees SMP as well as local barrier semantics 20 * (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e. 21 * UP: barrier(), SMP: smp_*mb == *mb) 22 * - DSYNC provides DMB+completion_of_cache_bpu_maintenance_ops hence not needed 23 * in the general case. Plus it only provides full barrier. 24 */ 25 26 #define mb() asm volatile("dmb 3\n" : : : "memory") 27 #define rmb() asm volatile("dmb 1\n" : : : "memory") 28 #define wmb() asm volatile("dmb 2\n" : : : "memory") 29 30 #else 31 32 /* 33 * ARCompact based cores (ARC700) only have SYNC instruction which is super 34 * heavy weight as it flushes the pipeline as well. 35 * There are no real SMP implementations of such cores. 36 */ 37 38 #define mb() asm volatile("sync\n" : : : "memory") 39 40 #endif 41 42 #include <asm-generic/barrier.h> 43 44 #endif 45