xref: /openbmc/linux/arch/arc/include/asm/atomic-llsc.h (revision e0302638)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #ifndef _ASM_ARC_ATOMIC_LLSC_H
4 #define _ASM_ARC_ATOMIC_LLSC_H
5 
6 #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
7 
8 #define ATOMIC_OP(op, c_op, asm_op)					\
9 static inline void arch_atomic_##op(int i, atomic_t *v)			\
10 {									\
11 	unsigned int val;						\
12 									\
13 	__asm__ __volatile__(						\
14 	"1:	llock   %[val], [%[ctr]]		\n"		\
15 	"	" #asm_op " %[val], %[val], %[i]	\n"		\
16 	"	scond   %[val], [%[ctr]]		\n"		\
17 	"	bnz     1b				\n"		\
18 	: [val]	"=&r"	(val) /* Early clobber to prevent reg reuse */	\
19 	: [ctr]	"r"	(&v->counter), /* Not "m": llock only supports reg direct addr mode */	\
20 	  [i]	"ir"	(i)						\
21 	: "cc");							\
22 }									\
23 
24 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
25 static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)	\
26 {									\
27 	unsigned int val;						\
28 									\
29 	__asm__ __volatile__(						\
30 	"1:	llock   %[val], [%[ctr]]		\n"		\
31 	"	" #asm_op " %[val], %[val], %[i]	\n"		\
32 	"	scond   %[val], [%[ctr]]		\n"		\
33 	"	bnz     1b				\n"		\
34 	: [val]	"=&r"	(val)						\
35 	: [ctr]	"r"	(&v->counter),					\
36 	  [i]	"ir"	(i)						\
37 	: "cc");							\
38 									\
39 	return val;							\
40 }
41 
42 #define arch_atomic_add_return_relaxed		arch_atomic_add_return_relaxed
43 #define arch_atomic_sub_return_relaxed		arch_atomic_sub_return_relaxed
44 
45 #define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
46 static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
47 {									\
48 	unsigned int val, orig;						\
49 									\
50 	__asm__ __volatile__(						\
51 	"1:	llock   %[orig], [%[ctr]]		\n"		\
52 	"	" #asm_op " %[val], %[orig], %[i]	\n"		\
53 	"	scond   %[val], [%[ctr]]		\n"		\
54 	"	bnz     1b				\n"		\
55 	: [val]	"=&r"	(val),						\
56 	  [orig] "=&r" (orig)						\
57 	: [ctr]	"r"	(&v->counter),					\
58 	  [i]	"ir"	(i)						\
59 	: "cc");							\
60 									\
61 	return orig;							\
62 }
63 
64 #define arch_atomic_fetch_add_relaxed		arch_atomic_fetch_add_relaxed
65 #define arch_atomic_fetch_sub_relaxed		arch_atomic_fetch_sub_relaxed
66 
67 #define arch_atomic_fetch_and_relaxed		arch_atomic_fetch_and_relaxed
68 #define arch_atomic_fetch_andnot_relaxed	arch_atomic_fetch_andnot_relaxed
69 #define arch_atomic_fetch_or_relaxed		arch_atomic_fetch_or_relaxed
70 #define arch_atomic_fetch_xor_relaxed		arch_atomic_fetch_xor_relaxed
71 
72 #define ATOMIC_OPS(op, c_op, asm_op)					\
73 	ATOMIC_OP(op, c_op, asm_op)					\
74 	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
75 	ATOMIC_FETCH_OP(op, c_op, asm_op)
76 
77 ATOMIC_OPS(add, +=, add)
78 ATOMIC_OPS(sub, -=, sub)
79 
80 #undef ATOMIC_OPS
81 #define ATOMIC_OPS(op, c_op, asm_op)					\
82 	ATOMIC_OP(op, c_op, asm_op)					\
83 	ATOMIC_FETCH_OP(op, c_op, asm_op)
84 
85 ATOMIC_OPS(and, &=, and)
86 ATOMIC_OPS(andnot, &= ~, bic)
87 ATOMIC_OPS(or, |=, or)
88 ATOMIC_OPS(xor, ^=, xor)
89 
90 #define arch_atomic_andnot		arch_atomic_andnot
91 
92 #undef ATOMIC_OPS
93 #undef ATOMIC_FETCH_OP
94 #undef ATOMIC_OP_RETURN
95 #undef ATOMIC_OP
96 
97 #endif
98