1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Copyright (C) 2012 Regents of the University of California 5 * Copyright (C) 2017 SiFive 6 */ 7 8 #ifndef _ASM_RISCV_ATOMIC_H 9 #define _ASM_RISCV_ATOMIC_H 10 11 #ifdef CONFIG_GENERIC_ATOMIC64 12 # include <asm-generic/atomic64.h> 13 #else 14 # if (__riscv_xlen < 64) 15 # error "64-bit atomics require XLEN to be at least 64" 16 # endif 17 #endif 18 19 #include <asm/cmpxchg.h> 20 #include <asm/barrier.h> 21 22 #define ATOMIC_INIT(i) { (i) } 23 24 #define __atomic_acquire_fence() \ 25 __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory") 26 27 #define __atomic_release_fence() \ 28 __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory"); 29 30 static __always_inline int atomic_read(const atomic_t *v) 31 { 32 return READ_ONCE(v->counter); 33 } 34 static __always_inline void atomic_set(atomic_t *v, int i) 35 { 36 WRITE_ONCE(v->counter, i); 37 } 38 39 #ifndef CONFIG_GENERIC_ATOMIC64 40 #define ATOMIC64_INIT(i) { (i) } 41 static __always_inline long atomic64_read(const atomic64_t *v) 42 { 43 return READ_ONCE(v->counter); 44 } 45 static __always_inline void atomic64_set(atomic64_t *v, long i) 46 { 47 WRITE_ONCE(v->counter, i); 48 } 49 #endif 50 51 /* 52 * First, the atomic ops that have no ordering constraints and therefor don't 53 * have the AQ or RL bits set. These don't return anything, so there's only 54 * one version to worry about. 55 */ 56 #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \ 57 static __always_inline \ 58 void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \ 59 { \ 60 __asm__ __volatile__ ( \ 61 " amo" #asm_op "." #asm_type " zero, %1, %0" \ 62 : "+A" (v->counter) \ 63 : "r" (I) \ 64 : "memory"); \ 65 } \ 66 67 #ifdef CONFIG_GENERIC_ATOMIC64 68 #define ATOMIC_OPS(op, asm_op, I) \ 69 ATOMIC_OP (op, asm_op, I, w, int, ) 70 #else 71 #define ATOMIC_OPS(op, asm_op, I) \ 72 ATOMIC_OP (op, asm_op, I, w, int, ) \ 73 ATOMIC_OP (op, asm_op, I, d, long, 64) 74 #endif 75 76 ATOMIC_OPS(add, add, i) 77 ATOMIC_OPS(sub, add, -i) 78 ATOMIC_OPS(and, and, i) 79 ATOMIC_OPS( or, or, i) 80 ATOMIC_OPS(xor, xor, i) 81 82 #undef ATOMIC_OP 83 #undef ATOMIC_OPS 84 85 /* 86 * Atomic ops that have ordered, relaxed, acquire, and release variants. 87 * There's two flavors of these: the arithmatic ops have both fetch and return 88 * versions, while the logical ops only have fetch versions. 89 */ 90 #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \ 91 static __always_inline \ 92 c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \ 93 atomic##prefix##_t *v) \ 94 { \ 95 register c_type ret; \ 96 __asm__ __volatile__ ( \ 97 " amo" #asm_op "." #asm_type " %1, %2, %0" \ 98 : "+A" (v->counter), "=r" (ret) \ 99 : "r" (I) \ 100 : "memory"); \ 101 return ret; \ 102 } \ 103 static __always_inline \ 104 c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \ 105 { \ 106 register c_type ret; \ 107 __asm__ __volatile__ ( \ 108 " amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \ 109 : "+A" (v->counter), "=r" (ret) \ 110 : "r" (I) \ 111 : "memory"); \ 112 return ret; \ 113 } 114 115 #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \ 116 static __always_inline \ 117 c_type atomic##prefix##_##op##_return_relaxed(c_type i, \ 118 atomic##prefix##_t *v) \ 119 { \ 120 return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \ 121 } \ 122 static __always_inline \ 123 c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \ 124 { \ 125 return atomic##prefix##_fetch_##op(i, v) c_op I; \ 126 } 127 128 #ifdef CONFIG_GENERIC_ATOMIC64 129 #define ATOMIC_OPS(op, asm_op, c_op, I) \ 130 ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \ 131 ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) 132 #else 133 #define ATOMIC_OPS(op, asm_op, c_op, I) \ 134 ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \ 135 ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \ 136 ATOMIC_FETCH_OP( op, asm_op, I, d, long, 64) \ 137 ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, long, 64) 138 #endif 139 140 ATOMIC_OPS(add, add, +, i) 141 ATOMIC_OPS(sub, add, +, -i) 142 143 #define atomic_add_return_relaxed atomic_add_return_relaxed 144 #define atomic_sub_return_relaxed atomic_sub_return_relaxed 145 #define atomic_add_return atomic_add_return 146 #define atomic_sub_return atomic_sub_return 147 148 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed 149 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed 150 #define atomic_fetch_add atomic_fetch_add 151 #define atomic_fetch_sub atomic_fetch_sub 152 153 #ifndef CONFIG_GENERIC_ATOMIC64 154 #define atomic64_add_return_relaxed atomic64_add_return_relaxed 155 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed 156 #define atomic64_add_return atomic64_add_return 157 #define atomic64_sub_return atomic64_sub_return 158 159 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed 160 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed 161 #define atomic64_fetch_add atomic64_fetch_add 162 #define atomic64_fetch_sub atomic64_fetch_sub 163 #endif 164 165 #undef ATOMIC_OPS 166 167 #ifdef CONFIG_GENERIC_ATOMIC64 168 #define ATOMIC_OPS(op, asm_op, I) \ 169 ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) 170 #else 171 #define ATOMIC_OPS(op, asm_op, I) \ 172 ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \ 173 ATOMIC_FETCH_OP(op, asm_op, I, d, long, 64) 174 #endif 175 176 ATOMIC_OPS(and, and, i) 177 ATOMIC_OPS( or, or, i) 178 ATOMIC_OPS(xor, xor, i) 179 180 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed 181 #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed 182 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed 183 #define atomic_fetch_and atomic_fetch_and 184 #define atomic_fetch_or atomic_fetch_or 185 #define atomic_fetch_xor atomic_fetch_xor 186 187 #ifndef CONFIG_GENERIC_ATOMIC64 188 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed 189 #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed 190 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed 191 #define atomic64_fetch_and atomic64_fetch_and 192 #define atomic64_fetch_or atomic64_fetch_or 193 #define atomic64_fetch_xor atomic64_fetch_xor 194 #endif 195 196 #undef ATOMIC_OPS 197 198 #undef ATOMIC_FETCH_OP 199 #undef ATOMIC_OP_RETURN 200 201 /* This is required to provide a full barrier on success. */ 202 static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) 203 { 204 int prev, rc; 205 206 __asm__ __volatile__ ( 207 "0: lr.w %[p], %[c]\n" 208 " beq %[p], %[u], 1f\n" 209 " add %[rc], %[p], %[a]\n" 210 " sc.w.rl %[rc], %[rc], %[c]\n" 211 " bnez %[rc], 0b\n" 212 " fence rw, rw\n" 213 "1:\n" 214 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) 215 : [a]"r" (a), [u]"r" (u) 216 : "memory"); 217 return prev; 218 } 219 #define atomic_fetch_add_unless atomic_fetch_add_unless 220 221 #ifndef CONFIG_GENERIC_ATOMIC64 222 static __always_inline long atomic64_fetch_add_unless(atomic64_t *v, long a, long u) 223 { 224 long prev, rc; 225 226 __asm__ __volatile__ ( 227 "0: lr.d %[p], %[c]\n" 228 " beq %[p], %[u], 1f\n" 229 " add %[rc], %[p], %[a]\n" 230 " sc.d.rl %[rc], %[rc], %[c]\n" 231 " bnez %[rc], 0b\n" 232 " fence rw, rw\n" 233 "1:\n" 234 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) 235 : [a]"r" (a), [u]"r" (u) 236 : "memory"); 237 return prev; 238 } 239 #define atomic64_fetch_add_unless atomic64_fetch_add_unless 240 #endif 241 242 /* 243 * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as 244 * {cmp,}xchg and the operations that return, so they need a full barrier. 245 */ 246 #define ATOMIC_OP(c_t, prefix, size) \ 247 static __always_inline \ 248 c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \ 249 { \ 250 return __xchg_relaxed(&(v->counter), n, size); \ 251 } \ 252 static __always_inline \ 253 c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \ 254 { \ 255 return __xchg_acquire(&(v->counter), n, size); \ 256 } \ 257 static __always_inline \ 258 c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \ 259 { \ 260 return __xchg_release(&(v->counter), n, size); \ 261 } \ 262 static __always_inline \ 263 c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \ 264 { \ 265 return __xchg(&(v->counter), n, size); \ 266 } \ 267 static __always_inline \ 268 c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \ 269 c_t o, c_t n) \ 270 { \ 271 return __cmpxchg_relaxed(&(v->counter), o, n, size); \ 272 } \ 273 static __always_inline \ 274 c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \ 275 c_t o, c_t n) \ 276 { \ 277 return __cmpxchg_acquire(&(v->counter), o, n, size); \ 278 } \ 279 static __always_inline \ 280 c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \ 281 c_t o, c_t n) \ 282 { \ 283 return __cmpxchg_release(&(v->counter), o, n, size); \ 284 } \ 285 static __always_inline \ 286 c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \ 287 { \ 288 return __cmpxchg(&(v->counter), o, n, size); \ 289 } 290 291 #ifdef CONFIG_GENERIC_ATOMIC64 292 #define ATOMIC_OPS() \ 293 ATOMIC_OP( int, , 4) 294 #else 295 #define ATOMIC_OPS() \ 296 ATOMIC_OP( int, , 4) \ 297 ATOMIC_OP(long, 64, 8) 298 #endif 299 300 ATOMIC_OPS() 301 302 #define atomic_xchg_relaxed atomic_xchg_relaxed 303 #define atomic_xchg_acquire atomic_xchg_acquire 304 #define atomic_xchg_release atomic_xchg_release 305 #define atomic_xchg atomic_xchg 306 #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed 307 #define atomic_cmpxchg_acquire atomic_cmpxchg_acquire 308 #define atomic_cmpxchg_release atomic_cmpxchg_release 309 #define atomic_cmpxchg atomic_cmpxchg 310 311 #undef ATOMIC_OPS 312 #undef ATOMIC_OP 313 314 static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset) 315 { 316 int prev, rc; 317 318 __asm__ __volatile__ ( 319 "0: lr.w %[p], %[c]\n" 320 " sub %[rc], %[p], %[o]\n" 321 " bltz %[rc], 1f\n" 322 " sc.w.rl %[rc], %[rc], %[c]\n" 323 " bnez %[rc], 0b\n" 324 " fence rw, rw\n" 325 "1:\n" 326 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) 327 : [o]"r" (offset) 328 : "memory"); 329 return prev - offset; 330 } 331 332 #define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1) 333 334 #ifndef CONFIG_GENERIC_ATOMIC64 335 static __always_inline long atomic64_sub_if_positive(atomic64_t *v, int offset) 336 { 337 long prev, rc; 338 339 __asm__ __volatile__ ( 340 "0: lr.d %[p], %[c]\n" 341 " sub %[rc], %[p], %[o]\n" 342 " bltz %[rc], 1f\n" 343 " sc.d.rl %[rc], %[rc], %[c]\n" 344 " bnez %[rc], 0b\n" 345 " fence rw, rw\n" 346 "1:\n" 347 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) 348 : [o]"r" (offset) 349 : "memory"); 350 return prev - offset; 351 } 352 353 #define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1) 354 #endif 355 356 #endif /* _ASM_RISCV_ATOMIC_H */ 357