xref: /openbmc/linux/arch/riscv/include/asm/atomic.h (revision ba61bb17)
1 /*
2  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
3  * Copyright (C) 2012 Regents of the University of California
4  * Copyright (C) 2017 SiFive
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11 
12 #ifndef _ASM_RISCV_ATOMIC_H
13 #define _ASM_RISCV_ATOMIC_H
14 
15 #ifdef CONFIG_GENERIC_ATOMIC64
16 # include <asm-generic/atomic64.h>
17 #else
18 # if (__riscv_xlen < 64)
19 #  error "64-bit atomics require XLEN to be at least 64"
20 # endif
21 #endif
22 
23 #include <asm/cmpxchg.h>
24 #include <asm/barrier.h>
25 
26 #define ATOMIC_INIT(i)	{ (i) }
27 
28 #define __atomic_op_acquire(op, args...)				\
29 ({									\
30 	typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);		\
31 	__asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory");	\
32 	__ret;								\
33 })
34 
35 #define __atomic_op_release(op, args...)				\
36 ({									\
37 	__asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");	\
38 	op##_relaxed(args);						\
39 })
40 
41 static __always_inline int atomic_read(const atomic_t *v)
42 {
43 	return READ_ONCE(v->counter);
44 }
45 static __always_inline void atomic_set(atomic_t *v, int i)
46 {
47 	WRITE_ONCE(v->counter, i);
48 }
49 
50 #ifndef CONFIG_GENERIC_ATOMIC64
51 #define ATOMIC64_INIT(i) { (i) }
52 static __always_inline long atomic64_read(const atomic64_t *v)
53 {
54 	return READ_ONCE(v->counter);
55 }
56 static __always_inline void atomic64_set(atomic64_t *v, long i)
57 {
58 	WRITE_ONCE(v->counter, i);
59 }
60 #endif
61 
62 /*
63  * First, the atomic ops that have no ordering constraints and therefor don't
64  * have the AQ or RL bits set.  These don't return anything, so there's only
65  * one version to worry about.
66  */
67 #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix)		\
68 static __always_inline							\
69 void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)		\
70 {									\
71 	__asm__ __volatile__ (						\
72 		"	amo" #asm_op "." #asm_type " zero, %1, %0"	\
73 		: "+A" (v->counter)					\
74 		: "r" (I)						\
75 		: "memory");						\
76 }									\
77 
78 #ifdef CONFIG_GENERIC_ATOMIC64
79 #define ATOMIC_OPS(op, asm_op, I)					\
80         ATOMIC_OP (op, asm_op, I, w,  int,   )
81 #else
82 #define ATOMIC_OPS(op, asm_op, I)					\
83         ATOMIC_OP (op, asm_op, I, w,  int,   )				\
84         ATOMIC_OP (op, asm_op, I, d, long, 64)
85 #endif
86 
87 ATOMIC_OPS(add, add,  i)
88 ATOMIC_OPS(sub, add, -i)
89 ATOMIC_OPS(and, and,  i)
90 ATOMIC_OPS( or,  or,  i)
91 ATOMIC_OPS(xor, xor,  i)
92 
93 #undef ATOMIC_OP
94 #undef ATOMIC_OPS
95 
96 /*
97  * Atomic ops that have ordered, relaxed, acquire, and release variants.
98  * There's two flavors of these: the arithmatic ops have both fetch and return
99  * versions, while the logical ops only have fetch versions.
100  */
101 #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix)	\
102 static __always_inline							\
103 c_type atomic##prefix##_fetch_##op##_relaxed(c_type i,			\
104 					     atomic##prefix##_t *v)	\
105 {									\
106 	register c_type ret;						\
107 	__asm__ __volatile__ (						\
108 		"	amo" #asm_op "." #asm_type " %1, %2, %0"	\
109 		: "+A" (v->counter), "=r" (ret)				\
110 		: "r" (I)						\
111 		: "memory");						\
112 	return ret;							\
113 }									\
114 static __always_inline							\
115 c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v)	\
116 {									\
117 	register c_type ret;						\
118 	__asm__ __volatile__ (						\
119 		"	amo" #asm_op "." #asm_type ".aqrl  %1, %2, %0"	\
120 		: "+A" (v->counter), "=r" (ret)				\
121 		: "r" (I)						\
122 		: "memory");						\
123 	return ret;							\
124 }
125 
126 #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix)	\
127 static __always_inline							\
128 c_type atomic##prefix##_##op##_return_relaxed(c_type i,			\
129 					      atomic##prefix##_t *v)	\
130 {									\
131         return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I;	\
132 }									\
133 static __always_inline							\
134 c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v)	\
135 {									\
136         return atomic##prefix##_fetch_##op(i, v) c_op I;		\
137 }
138 
139 #ifdef CONFIG_GENERIC_ATOMIC64
140 #define ATOMIC_OPS(op, asm_op, c_op, I)					\
141         ATOMIC_FETCH_OP( op, asm_op,       I, w,  int,   )		\
142         ATOMIC_OP_RETURN(op, asm_op, c_op, I, w,  int,   )
143 #else
144 #define ATOMIC_OPS(op, asm_op, c_op, I)					\
145         ATOMIC_FETCH_OP( op, asm_op,       I, w,  int,   )		\
146         ATOMIC_OP_RETURN(op, asm_op, c_op, I, w,  int,   )		\
147         ATOMIC_FETCH_OP( op, asm_op,       I, d, long, 64)		\
148         ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, long, 64)
149 #endif
150 
151 ATOMIC_OPS(add, add, +,  i)
152 ATOMIC_OPS(sub, add, +, -i)
153 
154 #define atomic_add_return_relaxed	atomic_add_return_relaxed
155 #define atomic_sub_return_relaxed	atomic_sub_return_relaxed
156 #define atomic_add_return		atomic_add_return
157 #define atomic_sub_return		atomic_sub_return
158 
159 #define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
160 #define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed
161 #define atomic_fetch_add		atomic_fetch_add
162 #define atomic_fetch_sub		atomic_fetch_sub
163 
164 #ifndef CONFIG_GENERIC_ATOMIC64
165 #define atomic64_add_return_relaxed	atomic64_add_return_relaxed
166 #define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
167 #define atomic64_add_return		atomic64_add_return
168 #define atomic64_sub_return		atomic64_sub_return
169 
170 #define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
171 #define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed
172 #define atomic64_fetch_add		atomic64_fetch_add
173 #define atomic64_fetch_sub		atomic64_fetch_sub
174 #endif
175 
176 #undef ATOMIC_OPS
177 
178 #ifdef CONFIG_GENERIC_ATOMIC64
179 #define ATOMIC_OPS(op, asm_op, I)					\
180         ATOMIC_FETCH_OP(op, asm_op, I, w,  int,   )
181 #else
182 #define ATOMIC_OPS(op, asm_op, I)					\
183         ATOMIC_FETCH_OP(op, asm_op, I, w,  int,   )			\
184         ATOMIC_FETCH_OP(op, asm_op, I, d, long, 64)
185 #endif
186 
187 ATOMIC_OPS(and, and, i)
188 ATOMIC_OPS( or,  or, i)
189 ATOMIC_OPS(xor, xor, i)
190 
191 #define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
192 #define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
193 #define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
194 #define atomic_fetch_and		atomic_fetch_and
195 #define atomic_fetch_or			atomic_fetch_or
196 #define atomic_fetch_xor		atomic_fetch_xor
197 
198 #ifndef CONFIG_GENERIC_ATOMIC64
199 #define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
200 #define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
201 #define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed
202 #define atomic64_fetch_and		atomic64_fetch_and
203 #define atomic64_fetch_or		atomic64_fetch_or
204 #define atomic64_fetch_xor		atomic64_fetch_xor
205 #endif
206 
207 #undef ATOMIC_OPS
208 
209 #undef ATOMIC_FETCH_OP
210 #undef ATOMIC_OP_RETURN
211 
212 /*
213  * The extra atomic operations that are constructed from one of the core
214  * AMO-based operations above (aside from sub, which is easier to fit above).
215  * These are required to perform a full barrier, but they're OK this way
216  * because atomic_*_return is also required to perform a full barrier.
217  *
218  */
219 #define ATOMIC_OP(op, func_op, comp_op, I, c_type, prefix)		\
220 static __always_inline							\
221 bool atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)		\
222 {									\
223 	return atomic##prefix##_##func_op##_return(i, v) comp_op I;	\
224 }
225 
226 #ifdef CONFIG_GENERIC_ATOMIC64
227 #define ATOMIC_OPS(op, func_op, comp_op, I)				\
228         ATOMIC_OP(op, func_op, comp_op, I,  int,   )
229 #else
230 #define ATOMIC_OPS(op, func_op, comp_op, I)				\
231         ATOMIC_OP(op, func_op, comp_op, I,  int,   )			\
232         ATOMIC_OP(op, func_op, comp_op, I, long, 64)
233 #endif
234 
235 ATOMIC_OPS(add_and_test, add, ==, 0)
236 ATOMIC_OPS(sub_and_test, sub, ==, 0)
237 ATOMIC_OPS(add_negative, add,  <, 0)
238 
239 #undef ATOMIC_OP
240 #undef ATOMIC_OPS
241 
242 #define ATOMIC_OP(op, func_op, I, c_type, prefix)			\
243 static __always_inline							\
244 void atomic##prefix##_##op(atomic##prefix##_t *v)			\
245 {									\
246 	atomic##prefix##_##func_op(I, v);				\
247 }
248 
249 #define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix)			\
250 static __always_inline							\
251 c_type atomic##prefix##_fetch_##op##_relaxed(atomic##prefix##_t *v)	\
252 {									\
253 	return atomic##prefix##_fetch_##func_op##_relaxed(I, v);	\
254 }									\
255 static __always_inline							\
256 c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v)		\
257 {									\
258 	return atomic##prefix##_fetch_##func_op(I, v);			\
259 }
260 
261 #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, c_type, prefix)		\
262 static __always_inline							\
263 c_type atomic##prefix##_##op##_return_relaxed(atomic##prefix##_t *v)	\
264 {									\
265         return atomic##prefix##_fetch_##op##_relaxed(v) c_op I;		\
266 }									\
267 static __always_inline							\
268 c_type atomic##prefix##_##op##_return(atomic##prefix##_t *v)		\
269 {									\
270         return atomic##prefix##_fetch_##op(v) c_op I;			\
271 }
272 
273 #ifdef CONFIG_GENERIC_ATOMIC64
274 #define ATOMIC_OPS(op, asm_op, c_op, I)					\
275         ATOMIC_OP(       op, asm_op,       I,  int,   )			\
276         ATOMIC_FETCH_OP( op, asm_op,       I,  int,   )			\
277         ATOMIC_OP_RETURN(op, asm_op, c_op, I,  int,   )
278 #else
279 #define ATOMIC_OPS(op, asm_op, c_op, I)					\
280         ATOMIC_OP(       op, asm_op,       I,  int,   )			\
281         ATOMIC_FETCH_OP( op, asm_op,       I,  int,   )			\
282         ATOMIC_OP_RETURN(op, asm_op, c_op, I,  int,   )			\
283         ATOMIC_OP(       op, asm_op,       I, long, 64)			\
284         ATOMIC_FETCH_OP( op, asm_op,       I, long, 64)			\
285         ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
286 #endif
287 
288 ATOMIC_OPS(inc, add, +,  1)
289 ATOMIC_OPS(dec, add, +, -1)
290 
291 #define atomic_inc_return_relaxed	atomic_inc_return_relaxed
292 #define atomic_dec_return_relaxed	atomic_dec_return_relaxed
293 #define atomic_inc_return		atomic_inc_return
294 #define atomic_dec_return		atomic_dec_return
295 
296 #define atomic_fetch_inc_relaxed	atomic_fetch_inc_relaxed
297 #define atomic_fetch_dec_relaxed	atomic_fetch_dec_relaxed
298 #define atomic_fetch_inc		atomic_fetch_inc
299 #define atomic_fetch_dec		atomic_fetch_dec
300 
301 #ifndef CONFIG_GENERIC_ATOMIC64
302 #define atomic64_inc_return_relaxed	atomic64_inc_return_relaxed
303 #define atomic64_dec_return_relaxed	atomic64_dec_return_relaxed
304 #define atomic64_inc_return		atomic64_inc_return
305 #define atomic64_dec_return		atomic64_dec_return
306 
307 #define atomic64_fetch_inc_relaxed	atomic64_fetch_inc_relaxed
308 #define atomic64_fetch_dec_relaxed	atomic64_fetch_dec_relaxed
309 #define atomic64_fetch_inc		atomic64_fetch_inc
310 #define atomic64_fetch_dec		atomic64_fetch_dec
311 #endif
312 
313 #undef ATOMIC_OPS
314 #undef ATOMIC_OP
315 #undef ATOMIC_FETCH_OP
316 #undef ATOMIC_OP_RETURN
317 
318 #define ATOMIC_OP(op, func_op, comp_op, I, prefix)			\
319 static __always_inline							\
320 bool atomic##prefix##_##op(atomic##prefix##_t *v)			\
321 {									\
322 	return atomic##prefix##_##func_op##_return(v) comp_op I;	\
323 }
324 
325 ATOMIC_OP(inc_and_test, inc, ==, 0,   )
326 ATOMIC_OP(dec_and_test, dec, ==, 0,   )
327 #ifndef CONFIG_GENERIC_ATOMIC64
328 ATOMIC_OP(inc_and_test, inc, ==, 0, 64)
329 ATOMIC_OP(dec_and_test, dec, ==, 0, 64)
330 #endif
331 
332 #undef ATOMIC_OP
333 
334 /* This is required to provide a full barrier on success. */
335 static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
336 {
337        int prev, rc;
338 
339 	__asm__ __volatile__ (
340 		"0:	lr.w     %[p],  %[c]\n"
341 		"	beq      %[p],  %[u], 1f\n"
342 		"	add      %[rc], %[p], %[a]\n"
343 		"	sc.w.rl  %[rc], %[rc], %[c]\n"
344 		"	bnez     %[rc], 0b\n"
345 		"	fence    rw, rw\n"
346 		"1:\n"
347 		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
348 		: [a]"r" (a), [u]"r" (u)
349 		: "memory");
350 	return prev;
351 }
352 
353 #ifndef CONFIG_GENERIC_ATOMIC64
354 static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u)
355 {
356        long prev, rc;
357 
358 	__asm__ __volatile__ (
359 		"0:	lr.d     %[p],  %[c]\n"
360 		"	beq      %[p],  %[u], 1f\n"
361 		"	add      %[rc], %[p], %[a]\n"
362 		"	sc.d.rl  %[rc], %[rc], %[c]\n"
363 		"	bnez     %[rc], 0b\n"
364 		"	fence    rw, rw\n"
365 		"1:\n"
366 		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
367 		: [a]"r" (a), [u]"r" (u)
368 		: "memory");
369 	return prev;
370 }
371 
372 static __always_inline int atomic64_add_unless(atomic64_t *v, long a, long u)
373 {
374 	return __atomic64_add_unless(v, a, u) != u;
375 }
376 #endif
377 
378 /*
379  * The extra atomic operations that are constructed from one of the core
380  * LR/SC-based operations above.
381  */
382 static __always_inline int atomic_inc_not_zero(atomic_t *v)
383 {
384         return __atomic_add_unless(v, 1, 0);
385 }
386 
387 #ifndef CONFIG_GENERIC_ATOMIC64
388 static __always_inline long atomic64_inc_not_zero(atomic64_t *v)
389 {
390         return atomic64_add_unless(v, 1, 0);
391 }
392 #endif
393 
394 /*
395  * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
396  * {cmp,}xchg and the operations that return, so they need a full barrier.
397  */
398 #define ATOMIC_OP(c_t, prefix, size)					\
399 static __always_inline							\
400 c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n)		\
401 {									\
402 	return __xchg_relaxed(&(v->counter), n, size);			\
403 }									\
404 static __always_inline							\
405 c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n)		\
406 {									\
407 	return __xchg_acquire(&(v->counter), n, size);			\
408 }									\
409 static __always_inline							\
410 c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n)		\
411 {									\
412 	return __xchg_release(&(v->counter), n, size);			\
413 }									\
414 static __always_inline							\
415 c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n)			\
416 {									\
417 	return __xchg(&(v->counter), n, size);				\
418 }									\
419 static __always_inline							\
420 c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v,		\
421 				     c_t o, c_t n)			\
422 {									\
423 	return __cmpxchg_relaxed(&(v->counter), o, n, size);		\
424 }									\
425 static __always_inline							\
426 c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v,		\
427 				     c_t o, c_t n)			\
428 {									\
429 	return __cmpxchg_acquire(&(v->counter), o, n, size);		\
430 }									\
431 static __always_inline							\
432 c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v,		\
433 				     c_t o, c_t n)			\
434 {									\
435 	return __cmpxchg_release(&(v->counter), o, n, size);		\
436 }									\
437 static __always_inline							\
438 c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n)	\
439 {									\
440 	return __cmpxchg(&(v->counter), o, n, size);			\
441 }
442 
443 #ifdef CONFIG_GENERIC_ATOMIC64
444 #define ATOMIC_OPS()							\
445 	ATOMIC_OP( int,   , 4)
446 #else
447 #define ATOMIC_OPS()							\
448 	ATOMIC_OP( int,   , 4)						\
449 	ATOMIC_OP(long, 64, 8)
450 #endif
451 
452 ATOMIC_OPS()
453 
454 #undef ATOMIC_OPS
455 #undef ATOMIC_OP
456 
457 static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
458 {
459        int prev, rc;
460 
461 	__asm__ __volatile__ (
462 		"0:	lr.w     %[p],  %[c]\n"
463 		"	sub      %[rc], %[p], %[o]\n"
464 		"	bltz     %[rc], 1f\n"
465 		"	sc.w.rl  %[rc], %[rc], %[c]\n"
466 		"	bnez     %[rc], 0b\n"
467 		"	fence    rw, rw\n"
468 		"1:\n"
469 		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
470 		: [o]"r" (offset)
471 		: "memory");
472 	return prev - offset;
473 }
474 
475 #define atomic_dec_if_positive(v)	atomic_sub_if_positive(v, 1)
476 
477 #ifndef CONFIG_GENERIC_ATOMIC64
478 static __always_inline long atomic64_sub_if_positive(atomic64_t *v, int offset)
479 {
480        long prev, rc;
481 
482 	__asm__ __volatile__ (
483 		"0:	lr.d     %[p],  %[c]\n"
484 		"	sub      %[rc], %[p], %[o]\n"
485 		"	bltz     %[rc], 1f\n"
486 		"	sc.d.rl  %[rc], %[rc], %[c]\n"
487 		"	bnez     %[rc], 0b\n"
488 		"	fence    rw, rw\n"
489 		"1:\n"
490 		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
491 		: [o]"r" (offset)
492 		: "memory");
493 	return prev - offset;
494 }
495 
496 #define atomic64_dec_if_positive(v)	atomic64_sub_if_positive(v, 1)
497 #endif
498 
499 #endif /* _ASM_RISCV_ATOMIC_H */
500