1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright IBM Corp. 1999, 2009 4 * 5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 6 */ 7 8 #ifndef __ASM_BARRIER_H 9 #define __ASM_BARRIER_H 10 11 /* 12 * Force strict CPU ordering. 13 * And yes, this is required on UP too when we're talking 14 * to devices. 15 */ 16 17 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 18 /* Fast-BCR without checkpoint synchronization */ 19 #define __ASM_BCR_SERIALIZE "bcr 14,0\n" 20 #else 21 #define __ASM_BCR_SERIALIZE "bcr 15,0\n" 22 #endif 23 24 static __always_inline void bcr_serialize(void) 25 { 26 asm volatile(__ASM_BCR_SERIALIZE : : : "memory"); 27 } 28 29 #define mb() bcr_serialize() 30 #define rmb() barrier() 31 #define wmb() barrier() 32 #define dma_rmb() mb() 33 #define dma_wmb() mb() 34 #define __smp_mb() mb() 35 #define __smp_rmb() rmb() 36 #define __smp_wmb() wmb() 37 38 #define __smp_store_release(p, v) \ 39 do { \ 40 compiletime_assert_atomic_type(*p); \ 41 barrier(); \ 42 WRITE_ONCE(*p, v); \ 43 } while (0) 44 45 #define __smp_load_acquire(p) \ 46 ({ \ 47 typeof(*p) ___p1 = READ_ONCE(*p); \ 48 compiletime_assert_atomic_type(*p); \ 49 barrier(); \ 50 ___p1; \ 51 }) 52 53 #define __smp_mb__before_atomic() barrier() 54 #define __smp_mb__after_atomic() barrier() 55 56 /** 57 * array_index_mask_nospec - generate a mask for array_idx() that is 58 * ~0UL when the bounds check succeeds and 0 otherwise 59 * @index: array element index 60 * @size: number of elements in array 61 */ 62 #define array_index_mask_nospec array_index_mask_nospec 63 static inline unsigned long array_index_mask_nospec(unsigned long index, 64 unsigned long size) 65 { 66 unsigned long mask; 67 68 if (__builtin_constant_p(size) && size > 0) { 69 asm(" clgr %2,%1\n" 70 " slbgr %0,%0\n" 71 :"=d" (mask) : "d" (size-1), "d" (index) :"cc"); 72 return mask; 73 } 74 asm(" clgr %1,%2\n" 75 " slbgr %0,%0\n" 76 :"=d" (mask) : "d" (size), "d" (index) :"cc"); 77 return ~mask; 78 } 79 80 #include <asm-generic/barrier.h> 81 82 #endif /* __ASM_BARRIER_H */ 83