xref: /openbmc/linux/arch/arc/include/asm/atomic.h (revision 86bee12f)
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #ifndef _ASM_ARC_ATOMIC_H
10 #define _ASM_ARC_ATOMIC_H
11 
12 #ifndef __ASSEMBLY__
13 
14 #include <linux/types.h>
15 #include <linux/compiler.h>
16 #include <asm/cmpxchg.h>
17 #include <asm/barrier.h>
18 #include <asm/smp.h>
19 
20 #ifndef CONFIG_ARC_PLAT_EZNPS
21 
22 #define atomic_read(v)  READ_ONCE((v)->counter)
23 
24 #ifdef CONFIG_ARC_HAS_LLSC
25 
26 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
27 
28 #define ATOMIC_OP(op, c_op, asm_op)					\
29 static inline void atomic_##op(int i, atomic_t *v)			\
30 {									\
31 	unsigned int val;						\
32 									\
33 	__asm__ __volatile__(						\
34 	"1:	llock   %[val], [%[ctr]]		\n"		\
35 	"	" #asm_op " %[val], %[val], %[i]	\n"		\
36 	"	scond   %[val], [%[ctr]]		\n"		\
37 	"	bnz     1b				\n"		\
38 	: [val]	"=&r"	(val) /* Early clobber to prevent reg reuse */	\
39 	: [ctr]	"r"	(&v->counter), /* Not "m": llock only supports reg direct addr mode */	\
40 	  [i]	"ir"	(i)						\
41 	: "cc");							\
42 }									\
43 
44 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
45 static inline int atomic_##op##_return(int i, atomic_t *v)		\
46 {									\
47 	unsigned int val;						\
48 									\
49 	/*								\
50 	 * Explicit full memory barrier needed before/after as		\
51 	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
52 	 */								\
53 	smp_mb();							\
54 									\
55 	__asm__ __volatile__(						\
56 	"1:	llock   %[val], [%[ctr]]		\n"		\
57 	"	" #asm_op " %[val], %[val], %[i]	\n"		\
58 	"	scond   %[val], [%[ctr]]		\n"		\
59 	"	bnz     1b				\n"		\
60 	: [val]	"=&r"	(val)						\
61 	: [ctr]	"r"	(&v->counter),					\
62 	  [i]	"ir"	(i)						\
63 	: "cc");							\
64 									\
65 	smp_mb();							\
66 									\
67 	return val;							\
68 }
69 
70 #else	/* !CONFIG_ARC_HAS_LLSC */
71 
72 #ifndef CONFIG_SMP
73 
74  /* violating atomic_xxx API locking protocol in UP for optimization sake */
75 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
76 
77 #else
78 
79 static inline void atomic_set(atomic_t *v, int i)
80 {
81 	/*
82 	 * Independent of hardware support, all of the atomic_xxx() APIs need
83 	 * to follow the same locking rules to make sure that a "hardware"
84 	 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
85 	 * sequence
86 	 *
87 	 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
88 	 * requires the locking.
89 	 */
90 	unsigned long flags;
91 
92 	atomic_ops_lock(flags);
93 	WRITE_ONCE(v->counter, i);
94 	atomic_ops_unlock(flags);
95 }
96 
97 #endif
98 
99 /*
100  * Non hardware assisted Atomic-R-M-W
101  * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
102  */
103 
104 #define ATOMIC_OP(op, c_op, asm_op)					\
105 static inline void atomic_##op(int i, atomic_t *v)			\
106 {									\
107 	unsigned long flags;						\
108 									\
109 	atomic_ops_lock(flags);						\
110 	v->counter c_op i;						\
111 	atomic_ops_unlock(flags);					\
112 }
113 
114 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
115 static inline int atomic_##op##_return(int i, atomic_t *v)		\
116 {									\
117 	unsigned long flags;						\
118 	unsigned long temp;						\
119 									\
120 	/*								\
121 	 * spin lock/unlock provides the needed smp_mb() before/after	\
122 	 */								\
123 	atomic_ops_lock(flags);						\
124 	temp = v->counter;						\
125 	temp c_op i;							\
126 	v->counter = temp;						\
127 	atomic_ops_unlock(flags);					\
128 									\
129 	return temp;							\
130 }
131 
132 #endif /* !CONFIG_ARC_HAS_LLSC */
133 
134 #define ATOMIC_OPS(op, c_op, asm_op)					\
135 	ATOMIC_OP(op, c_op, asm_op)					\
136 	ATOMIC_OP_RETURN(op, c_op, asm_op)
137 
138 ATOMIC_OPS(add, +=, add)
139 ATOMIC_OPS(sub, -=, sub)
140 
141 #define atomic_andnot atomic_andnot
142 
143 ATOMIC_OP(and, &=, and)
144 ATOMIC_OP(andnot, &= ~, bic)
145 ATOMIC_OP(or, |=, or)
146 ATOMIC_OP(xor, ^=, xor)
147 
148 #undef SCOND_FAIL_RETRY_VAR_DEF
149 #undef SCOND_FAIL_RETRY_ASM
150 #undef SCOND_FAIL_RETRY_VARS
151 
152 #else /* CONFIG_ARC_PLAT_EZNPS */
153 
154 static inline int atomic_read(const atomic_t *v)
155 {
156 	int temp;
157 
158 	__asm__ __volatile__(
159 	"	ld.di %0, [%1]"
160 	: "=r"(temp)
161 	: "r"(&v->counter)
162 	: "memory");
163 	return temp;
164 }
165 
166 static inline void atomic_set(atomic_t *v, int i)
167 {
168 	__asm__ __volatile__(
169 	"	st.di %0,[%1]"
170 	:
171 	: "r"(i), "r"(&v->counter)
172 	: "memory");
173 }
174 
175 #define ATOMIC_OP(op, c_op, asm_op)					\
176 static inline void atomic_##op(int i, atomic_t *v)			\
177 {									\
178 	__asm__ __volatile__(						\
179 	"	mov r2, %0\n"						\
180 	"	mov r3, %1\n"						\
181 	"       .word %2\n"						\
182 	:								\
183 	: "r"(i), "r"(&v->counter), "i"(asm_op)				\
184 	: "r2", "r3", "memory");					\
185 }									\
186 
187 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
188 static inline int atomic_##op##_return(int i, atomic_t *v)		\
189 {									\
190 	unsigned int temp = i;						\
191 									\
192 	/* Explicit full memory barrier needed before/after */		\
193 	smp_mb();							\
194 									\
195 	__asm__ __volatile__(						\
196 	"	mov r2, %0\n"						\
197 	"	mov r3, %1\n"						\
198 	"       .word %2\n"						\
199 	"	mov %0, r2"						\
200 	: "+r"(temp)							\
201 	: "r"(&v->counter), "i"(asm_op)					\
202 	: "r2", "r3", "memory");					\
203 									\
204 	smp_mb();							\
205 									\
206 	temp c_op i;							\
207 									\
208 	return temp;							\
209 }
210 
211 #define ATOMIC_OPS(op, c_op, asm_op)					\
212 	ATOMIC_OP(op, c_op, asm_op)					\
213 	ATOMIC_OP_RETURN(op, c_op, asm_op)
214 
215 ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
216 #define atomic_sub(i, v) atomic_add(-(i), (v))
217 #define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
218 
219 ATOMIC_OP(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
220 #define atomic_andnot(mask, v) atomic_and(~(mask), (v))
221 ATOMIC_OP(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
222 ATOMIC_OP(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
223 
224 #endif /* CONFIG_ARC_PLAT_EZNPS */
225 
226 #undef ATOMIC_OPS
227 #undef ATOMIC_OP_RETURN
228 #undef ATOMIC_OP
229 
230 /**
231  * __atomic_add_unless - add unless the number is a given value
232  * @v: pointer of type atomic_t
233  * @a: the amount to add to v...
234  * @u: ...unless v is equal to u.
235  *
236  * Atomically adds @a to @v, so long as it was not @u.
237  * Returns the old value of @v
238  */
239 #define __atomic_add_unless(v, a, u)					\
240 ({									\
241 	int c, old;							\
242 									\
243 	/*								\
244 	 * Explicit full memory barrier needed before/after as		\
245 	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
246 	 */								\
247 	smp_mb();							\
248 									\
249 	c = atomic_read(v);						\
250 	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
251 		c = old;						\
252 									\
253 	smp_mb();							\
254 									\
255 	c;								\
256 })
257 
258 #define atomic_inc_not_zero(v)		atomic_add_unless((v), 1, 0)
259 
260 #define atomic_inc(v)			atomic_add(1, v)
261 #define atomic_dec(v)			atomic_sub(1, v)
262 
263 #define atomic_inc_and_test(v)		(atomic_add_return(1, v) == 0)
264 #define atomic_dec_and_test(v)		(atomic_sub_return(1, v) == 0)
265 #define atomic_inc_return(v)		atomic_add_return(1, (v))
266 #define atomic_dec_return(v)		atomic_sub_return(1, (v))
267 #define atomic_sub_and_test(i, v)	(atomic_sub_return(i, v) == 0)
268 
269 #define atomic_add_negative(i, v)	(atomic_add_return(i, v) < 0)
270 
271 #define ATOMIC_INIT(i)			{ (i) }
272 
273 #include <asm-generic/atomic64.h>
274 
275 #endif
276 
277 #endif
278