1 /* 2 * Based on arch/arm/include/asm/cmpxchg.h 3 * 4 * Copyright (C) 2012 ARM Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 #ifndef __ASM_CMPXCHG_H 19 #define __ASM_CMPXCHG_H 20 21 #include <linux/build_bug.h> 22 #include <linux/compiler.h> 23 24 #include <asm/atomic.h> 25 #include <asm/barrier.h> 26 #include <asm/lse.h> 27 28 /* 29 * We need separate acquire parameters for ll/sc and lse, since the full 30 * barrier case is generated as release+dmb for the former and 31 * acquire+release for the latter. 32 */ 33 #define __XCHG_CASE(w, sz, name, mb, nop_lse, acq, acq_lse, rel, cl) \ 34 static inline unsigned long __xchg_case_##name(unsigned long x, \ 35 volatile void *ptr) \ 36 { \ 37 unsigned long ret, tmp; \ 38 \ 39 asm volatile(ARM64_LSE_ATOMIC_INSN( \ 40 /* LL/SC */ \ 41 " prfm pstl1strm, %2\n" \ 42 "1: ld" #acq "xr" #sz "\t%" #w "0, %2\n" \ 43 " st" #rel "xr" #sz "\t%w1, %" #w "3, %2\n" \ 44 " cbnz %w1, 1b\n" \ 45 " " #mb, \ 46 /* LSE atomics */ \ 47 " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \ 48 __nops(3) \ 49 " " #nop_lse) \ 50 : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \ 51 : "r" (x) \ 52 : cl); \ 53 \ 54 return ret; \ 55 } 56 57 __XCHG_CASE(w, b, 1, , , , , , ) 58 __XCHG_CASE(w, h, 2, , , , , , ) 59 __XCHG_CASE(w, , 4, , , , , , ) 60 __XCHG_CASE( , , 8, , , , , , ) 61 __XCHG_CASE(w, b, acq_1, , , a, a, , "memory") 62 __XCHG_CASE(w, h, acq_2, , , a, a, , "memory") 63 __XCHG_CASE(w, , acq_4, , , a, a, , "memory") 64 __XCHG_CASE( , , acq_8, , , a, a, , "memory") 65 __XCHG_CASE(w, b, rel_1, , , , , l, "memory") 66 __XCHG_CASE(w, h, rel_2, , , , , l, "memory") 67 __XCHG_CASE(w, , rel_4, , , , , l, "memory") 68 __XCHG_CASE( , , rel_8, , , , , l, "memory") 69 __XCHG_CASE(w, b, mb_1, dmb ish, nop, , a, l, "memory") 70 __XCHG_CASE(w, h, mb_2, dmb ish, nop, , a, l, "memory") 71 __XCHG_CASE(w, , mb_4, dmb ish, nop, , a, l, "memory") 72 __XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory") 73 74 #undef __XCHG_CASE 75 76 #define __XCHG_GEN(sfx) \ 77 static inline unsigned long __xchg##sfx(unsigned long x, \ 78 volatile void *ptr, \ 79 int size) \ 80 { \ 81 switch (size) { \ 82 case 1: \ 83 return __xchg_case##sfx##_1(x, ptr); \ 84 case 2: \ 85 return __xchg_case##sfx##_2(x, ptr); \ 86 case 4: \ 87 return __xchg_case##sfx##_4(x, ptr); \ 88 case 8: \ 89 return __xchg_case##sfx##_8(x, ptr); \ 90 default: \ 91 BUILD_BUG(); \ 92 } \ 93 \ 94 unreachable(); \ 95 } 96 97 __XCHG_GEN() 98 __XCHG_GEN(_acq) 99 __XCHG_GEN(_rel) 100 __XCHG_GEN(_mb) 101 102 #undef __XCHG_GEN 103 104 #define __xchg_wrapper(sfx, ptr, x) \ 105 ({ \ 106 __typeof__(*(ptr)) __ret; \ 107 __ret = (__typeof__(*(ptr))) \ 108 __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \ 109 __ret; \ 110 }) 111 112 /* xchg */ 113 #define xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__) 114 #define xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__) 115 #define xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__) 116 #define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__) 117 118 #define __CMPXCHG_GEN(sfx) \ 119 static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ 120 unsigned long old, \ 121 unsigned long new, \ 122 int size) \ 123 { \ 124 switch (size) { \ 125 case 1: \ 126 return __cmpxchg_case##sfx##_1(ptr, (u8)old, new); \ 127 case 2: \ 128 return __cmpxchg_case##sfx##_2(ptr, (u16)old, new); \ 129 case 4: \ 130 return __cmpxchg_case##sfx##_4(ptr, old, new); \ 131 case 8: \ 132 return __cmpxchg_case##sfx##_8(ptr, old, new); \ 133 default: \ 134 BUILD_BUG(); \ 135 } \ 136 \ 137 unreachable(); \ 138 } 139 140 __CMPXCHG_GEN() 141 __CMPXCHG_GEN(_acq) 142 __CMPXCHG_GEN(_rel) 143 __CMPXCHG_GEN(_mb) 144 145 #undef __CMPXCHG_GEN 146 147 #define __cmpxchg_wrapper(sfx, ptr, o, n) \ 148 ({ \ 149 __typeof__(*(ptr)) __ret; \ 150 __ret = (__typeof__(*(ptr))) \ 151 __cmpxchg##sfx((ptr), (unsigned long)(o), \ 152 (unsigned long)(n), sizeof(*(ptr))); \ 153 __ret; \ 154 }) 155 156 /* cmpxchg */ 157 #define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__) 158 #define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__) 159 #define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__) 160 #define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__) 161 #define cmpxchg_local cmpxchg_relaxed 162 163 /* cmpxchg64 */ 164 #define cmpxchg64_relaxed cmpxchg_relaxed 165 #define cmpxchg64_acquire cmpxchg_acquire 166 #define cmpxchg64_release cmpxchg_release 167 #define cmpxchg64 cmpxchg 168 #define cmpxchg64_local cmpxchg_local 169 170 /* cmpxchg_double */ 171 #define system_has_cmpxchg_double() 1 172 173 #define __cmpxchg_double_check(ptr1, ptr2) \ 174 ({ \ 175 if (sizeof(*(ptr1)) != 8) \ 176 BUILD_BUG(); \ 177 VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \ 178 }) 179 180 #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \ 181 ({\ 182 int __ret;\ 183 __cmpxchg_double_check(ptr1, ptr2); \ 184 __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \ 185 (unsigned long)(n1), (unsigned long)(n2), \ 186 ptr1); \ 187 __ret; \ 188 }) 189 190 #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \ 191 ({\ 192 int __ret;\ 193 __cmpxchg_double_check(ptr1, ptr2); \ 194 __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \ 195 (unsigned long)(n1), (unsigned long)(n2), \ 196 ptr1); \ 197 __ret; \ 198 }) 199 200 #define __CMPWAIT_CASE(w, sz, name) \ 201 static inline void __cmpwait_case_##name(volatile void *ptr, \ 202 unsigned long val) \ 203 { \ 204 unsigned long tmp; \ 205 \ 206 asm volatile( \ 207 " sevl\n" \ 208 " wfe\n" \ 209 " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \ 210 " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \ 211 " cbnz %" #w "[tmp], 1f\n" \ 212 " wfe\n" \ 213 "1:" \ 214 : [tmp] "=&r" (tmp), [v] "+Q" (*(unsigned long *)ptr) \ 215 : [val] "r" (val)); \ 216 } 217 218 __CMPWAIT_CASE(w, b, 1); 219 __CMPWAIT_CASE(w, h, 2); 220 __CMPWAIT_CASE(w, , 4); 221 __CMPWAIT_CASE( , , 8); 222 223 #undef __CMPWAIT_CASE 224 225 #define __CMPWAIT_GEN(sfx) \ 226 static inline void __cmpwait##sfx(volatile void *ptr, \ 227 unsigned long val, \ 228 int size) \ 229 { \ 230 switch (size) { \ 231 case 1: \ 232 return __cmpwait_case##sfx##_1(ptr, (u8)val); \ 233 case 2: \ 234 return __cmpwait_case##sfx##_2(ptr, (u16)val); \ 235 case 4: \ 236 return __cmpwait_case##sfx##_4(ptr, val); \ 237 case 8: \ 238 return __cmpwait_case##sfx##_8(ptr, val); \ 239 default: \ 240 BUILD_BUG(); \ 241 } \ 242 \ 243 unreachable(); \ 244 } 245 246 __CMPWAIT_GEN() 247 248 #undef __CMPWAIT_GEN 249 250 #define __cmpwait_relaxed(ptr, val) \ 251 __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr))) 252 253 #endif /* __ASM_CMPXCHG_H */ 254