1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Copyright (C) 2012 Regents of the University of California
5 * Copyright (C) 2017 SiFive
6 */
7
8 #ifndef _ASM_RISCV_ATOMIC_H
9 #define _ASM_RISCV_ATOMIC_H
10
11 #ifdef CONFIG_GENERIC_ATOMIC64
12 # include <asm-generic/atomic64.h>
13 #else
14 # if (__riscv_xlen < 64)
15 # error "64-bit atomics require XLEN to be at least 64"
16 # endif
17 #endif
18
19 #include <asm/cmpxchg.h>
20 #include <asm/barrier.h>
21
22 #define __atomic_acquire_fence() \
23 __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
24
25 #define __atomic_release_fence() \
26 __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
27
arch_atomic_read(const atomic_t * v)28 static __always_inline int arch_atomic_read(const atomic_t *v)
29 {
30 return READ_ONCE(v->counter);
31 }
arch_atomic_set(atomic_t * v,int i)32 static __always_inline void arch_atomic_set(atomic_t *v, int i)
33 {
34 WRITE_ONCE(v->counter, i);
35 }
36
37 #ifndef CONFIG_GENERIC_ATOMIC64
38 #define ATOMIC64_INIT(i) { (i) }
arch_atomic64_read(const atomic64_t * v)39 static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
40 {
41 return READ_ONCE(v->counter);
42 }
arch_atomic64_set(atomic64_t * v,s64 i)43 static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
44 {
45 WRITE_ONCE(v->counter, i);
46 }
47 #endif
48
49 /*
50 * First, the atomic ops that have no ordering constraints and therefor don't
51 * have the AQ or RL bits set. These don't return anything, so there's only
52 * one version to worry about.
53 */
54 #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
55 static __always_inline \
56 void arch_atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
57 { \
58 __asm__ __volatile__ ( \
59 " amo" #asm_op "." #asm_type " zero, %1, %0" \
60 : "+A" (v->counter) \
61 : "r" (I) \
62 : "memory"); \
63 } \
64
65 #ifdef CONFIG_GENERIC_ATOMIC64
66 #define ATOMIC_OPS(op, asm_op, I) \
67 ATOMIC_OP (op, asm_op, I, w, int, )
68 #else
69 #define ATOMIC_OPS(op, asm_op, I) \
70 ATOMIC_OP (op, asm_op, I, w, int, ) \
71 ATOMIC_OP (op, asm_op, I, d, s64, 64)
72 #endif
73
ATOMIC_OPS(add,add,i)74 ATOMIC_OPS(add, add, i)
75 ATOMIC_OPS(sub, add, -i)
76 ATOMIC_OPS(and, and, i)
77 ATOMIC_OPS( or, or, i)
78 ATOMIC_OPS(xor, xor, i)
79
80 #undef ATOMIC_OP
81 #undef ATOMIC_OPS
82
83 /*
84 * Atomic ops that have ordered, relaxed, acquire, and release variants.
85 * There's two flavors of these: the arithmatic ops have both fetch and return
86 * versions, while the logical ops only have fetch versions.
87 */
88 #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
89 static __always_inline \
90 c_type arch_atomic##prefix##_fetch_##op##_relaxed(c_type i, \
91 atomic##prefix##_t *v) \
92 { \
93 register c_type ret; \
94 __asm__ __volatile__ ( \
95 " amo" #asm_op "." #asm_type " %1, %2, %0" \
96 : "+A" (v->counter), "=r" (ret) \
97 : "r" (I) \
98 : "memory"); \
99 return ret; \
100 } \
101 static __always_inline \
102 c_type arch_atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
103 { \
104 register c_type ret; \
105 __asm__ __volatile__ ( \
106 " amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \
107 : "+A" (v->counter), "=r" (ret) \
108 : "r" (I) \
109 : "memory"); \
110 return ret; \
111 }
112
113 #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
114 static __always_inline \
115 c_type arch_atomic##prefix##_##op##_return_relaxed(c_type i, \
116 atomic##prefix##_t *v) \
117 { \
118 return arch_atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
119 } \
120 static __always_inline \
121 c_type arch_atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
122 { \
123 return arch_atomic##prefix##_fetch_##op(i, v) c_op I; \
124 }
125
126 #ifdef CONFIG_GENERIC_ATOMIC64
127 #define ATOMIC_OPS(op, asm_op, c_op, I) \
128 ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
129 ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
130 #else
131 #define ATOMIC_OPS(op, asm_op, c_op, I) \
132 ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
133 ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
134 ATOMIC_FETCH_OP( op, asm_op, I, d, s64, 64) \
135 ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
136 #endif
137
138 ATOMIC_OPS(add, add, +, i)
139 ATOMIC_OPS(sub, add, +, -i)
140
141 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
142 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
143 #define arch_atomic_add_return arch_atomic_add_return
144 #define arch_atomic_sub_return arch_atomic_sub_return
145
146 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
147 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
148 #define arch_atomic_fetch_add arch_atomic_fetch_add
149 #define arch_atomic_fetch_sub arch_atomic_fetch_sub
150
151 #ifndef CONFIG_GENERIC_ATOMIC64
152 #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
153 #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
154 #define arch_atomic64_add_return arch_atomic64_add_return
155 #define arch_atomic64_sub_return arch_atomic64_sub_return
156
157 #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
158 #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
159 #define arch_atomic64_fetch_add arch_atomic64_fetch_add
160 #define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
161 #endif
162
163 #undef ATOMIC_OPS
164
165 #ifdef CONFIG_GENERIC_ATOMIC64
166 #define ATOMIC_OPS(op, asm_op, I) \
167 ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
168 #else
169 #define ATOMIC_OPS(op, asm_op, I) \
170 ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
171 ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
172 #endif
173
174 ATOMIC_OPS(and, and, i)
175 ATOMIC_OPS( or, or, i)
176 ATOMIC_OPS(xor, xor, i)
177
178 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
179 #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
180 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
181 #define arch_atomic_fetch_and arch_atomic_fetch_and
182 #define arch_atomic_fetch_or arch_atomic_fetch_or
183 #define arch_atomic_fetch_xor arch_atomic_fetch_xor
184
185 #ifndef CONFIG_GENERIC_ATOMIC64
186 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
187 #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
188 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
189 #define arch_atomic64_fetch_and arch_atomic64_fetch_and
190 #define arch_atomic64_fetch_or arch_atomic64_fetch_or
191 #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
192 #endif
193
194 #undef ATOMIC_OPS
195
196 #undef ATOMIC_FETCH_OP
197 #undef ATOMIC_OP_RETURN
198
199 /* This is required to provide a full barrier on success. */
200 static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
201 {
202 int prev, rc;
203
204 __asm__ __volatile__ (
205 "0: lr.w %[p], %[c]\n"
206 " beq %[p], %[u], 1f\n"
207 " add %[rc], %[p], %[a]\n"
208 " sc.w.rl %[rc], %[rc], %[c]\n"
209 " bnez %[rc], 0b\n"
210 " fence rw, rw\n"
211 "1:\n"
212 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
213 : [a]"r" (a), [u]"r" (u)
214 : "memory");
215 return prev;
216 }
217 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
218
219 #ifndef CONFIG_GENERIC_ATOMIC64
arch_atomic64_fetch_add_unless(atomic64_t * v,s64 a,s64 u)220 static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
221 {
222 s64 prev;
223 long rc;
224
225 __asm__ __volatile__ (
226 "0: lr.d %[p], %[c]\n"
227 " beq %[p], %[u], 1f\n"
228 " add %[rc], %[p], %[a]\n"
229 " sc.d.rl %[rc], %[rc], %[c]\n"
230 " bnez %[rc], 0b\n"
231 " fence rw, rw\n"
232 "1:\n"
233 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
234 : [a]"r" (a), [u]"r" (u)
235 : "memory");
236 return prev;
237 }
238 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
239 #endif
240
arch_atomic_inc_unless_negative(atomic_t * v)241 static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
242 {
243 int prev, rc;
244
245 __asm__ __volatile__ (
246 "0: lr.w %[p], %[c]\n"
247 " bltz %[p], 1f\n"
248 " addi %[rc], %[p], 1\n"
249 " sc.w.rl %[rc], %[rc], %[c]\n"
250 " bnez %[rc], 0b\n"
251 " fence rw, rw\n"
252 "1:\n"
253 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
254 :
255 : "memory");
256 return !(prev < 0);
257 }
258
259 #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
260
arch_atomic_dec_unless_positive(atomic_t * v)261 static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
262 {
263 int prev, rc;
264
265 __asm__ __volatile__ (
266 "0: lr.w %[p], %[c]\n"
267 " bgtz %[p], 1f\n"
268 " addi %[rc], %[p], -1\n"
269 " sc.w.rl %[rc], %[rc], %[c]\n"
270 " bnez %[rc], 0b\n"
271 " fence rw, rw\n"
272 "1:\n"
273 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
274 :
275 : "memory");
276 return !(prev > 0);
277 }
278
279 #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
280
arch_atomic_dec_if_positive(atomic_t * v)281 static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
282 {
283 int prev, rc;
284
285 __asm__ __volatile__ (
286 "0: lr.w %[p], %[c]\n"
287 " addi %[rc], %[p], -1\n"
288 " bltz %[rc], 1f\n"
289 " sc.w.rl %[rc], %[rc], %[c]\n"
290 " bnez %[rc], 0b\n"
291 " fence rw, rw\n"
292 "1:\n"
293 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
294 :
295 : "memory");
296 return prev - 1;
297 }
298
299 #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
300
301 #ifndef CONFIG_GENERIC_ATOMIC64
arch_atomic64_inc_unless_negative(atomic64_t * v)302 static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
303 {
304 s64 prev;
305 long rc;
306
307 __asm__ __volatile__ (
308 "0: lr.d %[p], %[c]\n"
309 " bltz %[p], 1f\n"
310 " addi %[rc], %[p], 1\n"
311 " sc.d.rl %[rc], %[rc], %[c]\n"
312 " bnez %[rc], 0b\n"
313 " fence rw, rw\n"
314 "1:\n"
315 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
316 :
317 : "memory");
318 return !(prev < 0);
319 }
320
321 #define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
322
arch_atomic64_dec_unless_positive(atomic64_t * v)323 static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
324 {
325 s64 prev;
326 long rc;
327
328 __asm__ __volatile__ (
329 "0: lr.d %[p], %[c]\n"
330 " bgtz %[p], 1f\n"
331 " addi %[rc], %[p], -1\n"
332 " sc.d.rl %[rc], %[rc], %[c]\n"
333 " bnez %[rc], 0b\n"
334 " fence rw, rw\n"
335 "1:\n"
336 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
337 :
338 : "memory");
339 return !(prev > 0);
340 }
341
342 #define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
343
arch_atomic64_dec_if_positive(atomic64_t * v)344 static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
345 {
346 s64 prev;
347 long rc;
348
349 __asm__ __volatile__ (
350 "0: lr.d %[p], %[c]\n"
351 " addi %[rc], %[p], -1\n"
352 " bltz %[rc], 1f\n"
353 " sc.d.rl %[rc], %[rc], %[c]\n"
354 " bnez %[rc], 0b\n"
355 " fence rw, rw\n"
356 "1:\n"
357 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
358 :
359 : "memory");
360 return prev - 1;
361 }
362
363 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
364 #endif
365
366 #endif /* _ASM_RISCV_ATOMIC_H */
367