xref: /openbmc/linux/arch/s390/include/asm/barrier.h (revision b3fd7368f8f60bc9a7ffc2a5311db5f4dbd42180)
1  /*
2   * Copyright IBM Corp. 1999, 2009
3   *
4   * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5   */
6  
7  #ifndef __ASM_BARRIER_H
8  #define __ASM_BARRIER_H
9  
10  /*
11   * Force strict CPU ordering.
12   * And yes, this is required on UP too when we're talking
13   * to devices.
14   */
15  
16  #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
17  /* Fast-BCR without checkpoint synchronization */
18  #define __ASM_BARRIER "bcr 14,0\n"
19  #else
20  #define __ASM_BARRIER "bcr 15,0\n"
21  #endif
22  
23  #define mb() do {  asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
24  
25  #define rmb()				mb()
26  #define wmb()				mb()
27  #define dma_rmb()			rmb()
28  #define dma_wmb()			wmb()
29  #define smp_mb()			mb()
30  #define smp_rmb()			rmb()
31  #define smp_wmb()			wmb()
32  
33  #define read_barrier_depends()		do { } while (0)
34  #define smp_read_barrier_depends()	do { } while (0)
35  
36  #define smp_mb__before_atomic()		smp_mb()
37  #define smp_mb__after_atomic()		smp_mb()
38  
39  #define set_mb(var, value)		do { var = value; mb(); } while (0)
40  
41  #define smp_store_release(p, v)						\
42  do {									\
43  	compiletime_assert_atomic_type(*p);				\
44  	barrier();							\
45  	ACCESS_ONCE(*p) = (v);						\
46  } while (0)
47  
48  #define smp_load_acquire(p)						\
49  ({									\
50  	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
51  	compiletime_assert_atomic_type(*p);				\
52  	barrier();							\
53  	___p1;								\
54  })
55  
56  #endif /* __ASM_BARRIER_H */
57