xref: /openbmc/linux/arch/mips/include/asm/atomic.h (revision 9fb29c73)
1 /*
2  * Atomic operations that C can't guarantee us.  Useful for
3  * resource counting etc..
4  *
5  * But use these as seldom as possible since they are much more slower
6  * than regular operations.
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  *
12  * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
13  */
14 #ifndef _ASM_ATOMIC_H
15 #define _ASM_ATOMIC_H
16 
17 #include <linux/irqflags.h>
18 #include <linux/types.h>
19 #include <asm/barrier.h>
20 #include <asm/compiler.h>
21 #include <asm/cpu-features.h>
22 #include <asm/cmpxchg.h>
23 #include <asm/war.h>
24 
25 /*
26  * Using a branch-likely instruction to check the result of an sc instruction
27  * works around a bug present in R10000 CPUs prior to revision 3.0 that could
28  * cause ll-sc sequences to execute non-atomically.
29  */
30 #if R10000_LLSC_WAR
31 # define __scbeqz "beqzl"
32 #else
33 # define __scbeqz "beqz"
34 #endif
35 
36 #define ATOMIC_INIT(i)	  { (i) }
37 
38 /*
39  * atomic_read - read atomic variable
40  * @v: pointer of type atomic_t
41  *
42  * Atomically reads the value of @v.
43  */
44 #define atomic_read(v)		READ_ONCE((v)->counter)
45 
46 /*
47  * atomic_set - set atomic variable
48  * @v: pointer of type atomic_t
49  * @i: required value
50  *
51  * Atomically sets the value of @v to @i.
52  */
53 #define atomic_set(v, i)	WRITE_ONCE((v)->counter, (i))
54 
55 #define ATOMIC_OP(op, c_op, asm_op)					      \
56 static __inline__ void atomic_##op(int i, atomic_t * v)			      \
57 {									      \
58 	if (kernel_uses_llsc) {						      \
59 		int temp;						      \
60 									      \
61 		__asm__ __volatile__(					      \
62 		"	.set	push					\n"   \
63 		"	.set	"MIPS_ISA_LEVEL"			\n"   \
64 		"1:	ll	%0, %1		# atomic_" #op "	\n"   \
65 		"	" #asm_op " %0, %2				\n"   \
66 		"	sc	%0, %1					\n"   \
67 		"\t" __scbeqz "	%0, 1b					\n"   \
68 		"	.set	pop					\n"   \
69 		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)	      \
70 		: "Ir" (i));						      \
71 	} else {							      \
72 		unsigned long flags;					      \
73 									      \
74 		raw_local_irq_save(flags);				      \
75 		v->counter c_op i;					      \
76 		raw_local_irq_restore(flags);				      \
77 	}								      \
78 }
79 
80 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				      \
81 static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v)	      \
82 {									      \
83 	int result;							      \
84 									      \
85 	if (kernel_uses_llsc) {						      \
86 		int temp;						      \
87 									      \
88 		__asm__ __volatile__(					      \
89 		"	.set	push					\n"   \
90 		"	.set	"MIPS_ISA_LEVEL"			\n"   \
91 		"1:	ll	%1, %2		# atomic_" #op "_return	\n"   \
92 		"	" #asm_op " %0, %1, %3				\n"   \
93 		"	sc	%0, %2					\n"   \
94 		"\t" __scbeqz "	%0, 1b					\n"   \
95 		"	" #asm_op " %0, %1, %3				\n"   \
96 		"	.set	pop					\n"   \
97 		: "=&r" (result), "=&r" (temp),				      \
98 		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
99 		: "Ir" (i));						      \
100 	} else {							      \
101 		unsigned long flags;					      \
102 									      \
103 		raw_local_irq_save(flags);				      \
104 		result = v->counter;					      \
105 		result c_op i;						      \
106 		v->counter = result;					      \
107 		raw_local_irq_restore(flags);				      \
108 	}								      \
109 									      \
110 	return result;							      \
111 }
112 
113 #define ATOMIC_FETCH_OP(op, c_op, asm_op)				      \
114 static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v)	      \
115 {									      \
116 	int result;							      \
117 									      \
118 	if (kernel_uses_llsc) {						      \
119 		int temp;						      \
120 									      \
121 		__asm__ __volatile__(					      \
122 		"	.set	push					\n"   \
123 		"	.set	"MIPS_ISA_LEVEL"			\n"   \
124 		"1:	ll	%1, %2		# atomic_fetch_" #op "	\n"   \
125 		"	" #asm_op " %0, %1, %3				\n"   \
126 		"	sc	%0, %2					\n"   \
127 		"\t" __scbeqz "	%0, 1b					\n"   \
128 		"	.set	pop					\n"   \
129 		"	move	%0, %1					\n"   \
130 		: "=&r" (result), "=&r" (temp),				      \
131 		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
132 		: "Ir" (i));						      \
133 	} else {							      \
134 		unsigned long flags;					      \
135 									      \
136 		raw_local_irq_save(flags);				      \
137 		result = v->counter;					      \
138 		v->counter c_op i;					      \
139 		raw_local_irq_restore(flags);				      \
140 	}								      \
141 									      \
142 	return result;							      \
143 }
144 
145 #define ATOMIC_OPS(op, c_op, asm_op)					      \
146 	ATOMIC_OP(op, c_op, asm_op)					      \
147 	ATOMIC_OP_RETURN(op, c_op, asm_op)				      \
148 	ATOMIC_FETCH_OP(op, c_op, asm_op)
149 
150 ATOMIC_OPS(add, +=, addu)
151 ATOMIC_OPS(sub, -=, subu)
152 
153 #define atomic_add_return_relaxed	atomic_add_return_relaxed
154 #define atomic_sub_return_relaxed	atomic_sub_return_relaxed
155 #define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
156 #define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed
157 
158 #undef ATOMIC_OPS
159 #define ATOMIC_OPS(op, c_op, asm_op)					      \
160 	ATOMIC_OP(op, c_op, asm_op)					      \
161 	ATOMIC_FETCH_OP(op, c_op, asm_op)
162 
163 ATOMIC_OPS(and, &=, and)
164 ATOMIC_OPS(or, |=, or)
165 ATOMIC_OPS(xor, ^=, xor)
166 
167 #define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
168 #define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
169 #define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
170 
171 #undef ATOMIC_OPS
172 #undef ATOMIC_FETCH_OP
173 #undef ATOMIC_OP_RETURN
174 #undef ATOMIC_OP
175 
176 /*
177  * atomic_sub_if_positive - conditionally subtract integer from atomic variable
178  * @i: integer value to subtract
179  * @v: pointer of type atomic_t
180  *
181  * Atomically test @v and subtract @i if @v is greater or equal than @i.
182  * The function returns the old value of @v minus @i.
183  */
184 static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
185 {
186 	int result;
187 
188 	smp_mb__before_llsc();
189 
190 	if (kernel_uses_llsc) {
191 		int temp;
192 
193 		__asm__ __volatile__(
194 		"	.set	push					\n"
195 		"	.set	"MIPS_ISA_LEVEL"			\n"
196 		"1:	ll	%1, %2		# atomic_sub_if_positive\n"
197 		"	.set	pop					\n"
198 		"	subu	%0, %1, %3				\n"
199 		"	move	%1, %0					\n"
200 		"	bltz	%0, 1f					\n"
201 		"	.set	push					\n"
202 		"	.set	"MIPS_ISA_LEVEL"			\n"
203 		"	sc	%1, %2					\n"
204 		"\t" __scbeqz "	%1, 1b					\n"
205 		"1:							\n"
206 		"	.set	pop					\n"
207 		: "=&r" (result), "=&r" (temp),
208 		  "+" GCC_OFF_SMALL_ASM() (v->counter)
209 		: "Ir" (i));
210 	} else {
211 		unsigned long flags;
212 
213 		raw_local_irq_save(flags);
214 		result = v->counter;
215 		result -= i;
216 		if (result >= 0)
217 			v->counter = result;
218 		raw_local_irq_restore(flags);
219 	}
220 
221 	smp_llsc_mb();
222 
223 	return result;
224 }
225 
226 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
227 #define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
228 
229 /*
230  * atomic_dec_if_positive - decrement by 1 if old value positive
231  * @v: pointer of type atomic_t
232  */
233 #define atomic_dec_if_positive(v)	atomic_sub_if_positive(1, v)
234 
235 #ifdef CONFIG_64BIT
236 
237 #define ATOMIC64_INIT(i)    { (i) }
238 
239 /*
240  * atomic64_read - read atomic variable
241  * @v: pointer of type atomic64_t
242  *
243  */
244 #define atomic64_read(v)	READ_ONCE((v)->counter)
245 
246 /*
247  * atomic64_set - set atomic variable
248  * @v: pointer of type atomic64_t
249  * @i: required value
250  */
251 #define atomic64_set(v, i)	WRITE_ONCE((v)->counter, (i))
252 
253 #define ATOMIC64_OP(op, c_op, asm_op)					      \
254 static __inline__ void atomic64_##op(long i, atomic64_t * v)		      \
255 {									      \
256 	if (kernel_uses_llsc) {						      \
257 		long temp;						      \
258 									      \
259 		__asm__ __volatile__(					      \
260 		"	.set	push					\n"   \
261 		"	.set	"MIPS_ISA_LEVEL"			\n"   \
262 		"1:	lld	%0, %1		# atomic64_" #op "	\n"   \
263 		"	" #asm_op " %0, %2				\n"   \
264 		"	scd	%0, %1					\n"   \
265 		"\t" __scbeqz "	%0, 1b					\n"   \
266 		"	.set	pop					\n"   \
267 		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)	      \
268 		: "Ir" (i));						      \
269 	} else {							      \
270 		unsigned long flags;					      \
271 									      \
272 		raw_local_irq_save(flags);				      \
273 		v->counter c_op i;					      \
274 		raw_local_irq_restore(flags);				      \
275 	}								      \
276 }
277 
278 #define ATOMIC64_OP_RETURN(op, c_op, asm_op)				      \
279 static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
280 {									      \
281 	long result;							      \
282 									      \
283 	if (kernel_uses_llsc) {						      \
284 		long temp;						      \
285 									      \
286 		__asm__ __volatile__(					      \
287 		"	.set	push					\n"   \
288 		"	.set	"MIPS_ISA_LEVEL"			\n"   \
289 		"1:	lld	%1, %2		# atomic64_" #op "_return\n"  \
290 		"	" #asm_op " %0, %1, %3				\n"   \
291 		"	scd	%0, %2					\n"   \
292 		"\t" __scbeqz "	%0, 1b					\n"   \
293 		"	" #asm_op " %0, %1, %3				\n"   \
294 		"	.set	pop					\n"   \
295 		: "=&r" (result), "=&r" (temp),				      \
296 		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
297 		: "Ir" (i));						      \
298 	} else {							      \
299 		unsigned long flags;					      \
300 									      \
301 		raw_local_irq_save(flags);				      \
302 		result = v->counter;					      \
303 		result c_op i;						      \
304 		v->counter = result;					      \
305 		raw_local_irq_restore(flags);				      \
306 	}								      \
307 									      \
308 	return result;							      \
309 }
310 
311 #define ATOMIC64_FETCH_OP(op, c_op, asm_op)				      \
312 static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v)  \
313 {									      \
314 	long result;							      \
315 									      \
316 	if (kernel_uses_llsc) {						      \
317 		long temp;						      \
318 									      \
319 		__asm__ __volatile__(					      \
320 		"	.set	push					\n"   \
321 		"	.set	"MIPS_ISA_LEVEL"			\n"   \
322 		"1:	lld	%1, %2		# atomic64_fetch_" #op "\n"   \
323 		"	" #asm_op " %0, %1, %3				\n"   \
324 		"	scd	%0, %2					\n"   \
325 		"\t" __scbeqz "	%0, 1b					\n"   \
326 		"	move	%0, %1					\n"   \
327 		"	.set	pop					\n"   \
328 		: "=&r" (result), "=&r" (temp),				      \
329 		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
330 		: "Ir" (i));						      \
331 	} else {							      \
332 		unsigned long flags;					      \
333 									      \
334 		raw_local_irq_save(flags);				      \
335 		result = v->counter;					      \
336 		v->counter c_op i;					      \
337 		raw_local_irq_restore(flags);				      \
338 	}								      \
339 									      \
340 	return result;							      \
341 }
342 
343 #define ATOMIC64_OPS(op, c_op, asm_op)					      \
344 	ATOMIC64_OP(op, c_op, asm_op)					      \
345 	ATOMIC64_OP_RETURN(op, c_op, asm_op)				      \
346 	ATOMIC64_FETCH_OP(op, c_op, asm_op)
347 
348 ATOMIC64_OPS(add, +=, daddu)
349 ATOMIC64_OPS(sub, -=, dsubu)
350 
351 #define atomic64_add_return_relaxed	atomic64_add_return_relaxed
352 #define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
353 #define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
354 #define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed
355 
356 #undef ATOMIC64_OPS
357 #define ATOMIC64_OPS(op, c_op, asm_op)					      \
358 	ATOMIC64_OP(op, c_op, asm_op)					      \
359 	ATOMIC64_FETCH_OP(op, c_op, asm_op)
360 
361 ATOMIC64_OPS(and, &=, and)
362 ATOMIC64_OPS(or, |=, or)
363 ATOMIC64_OPS(xor, ^=, xor)
364 
365 #define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
366 #define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
367 #define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed
368 
369 #undef ATOMIC64_OPS
370 #undef ATOMIC64_FETCH_OP
371 #undef ATOMIC64_OP_RETURN
372 #undef ATOMIC64_OP
373 
374 /*
375  * atomic64_sub_if_positive - conditionally subtract integer from atomic
376  *                            variable
377  * @i: integer value to subtract
378  * @v: pointer of type atomic64_t
379  *
380  * Atomically test @v and subtract @i if @v is greater or equal than @i.
381  * The function returns the old value of @v minus @i.
382  */
383 static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
384 {
385 	long result;
386 
387 	smp_mb__before_llsc();
388 
389 	if (kernel_uses_llsc) {
390 		long temp;
391 
392 		__asm__ __volatile__(
393 		"	.set	push					\n"
394 		"	.set	"MIPS_ISA_LEVEL"			\n"
395 		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"
396 		"	dsubu	%0, %1, %3				\n"
397 		"	move	%1, %0					\n"
398 		"	bltz	%0, 1f					\n"
399 		"	scd	%1, %2					\n"
400 		"\t" __scbeqz "	%1, 1b					\n"
401 		"1:							\n"
402 		"	.set	pop					\n"
403 		: "=&r" (result), "=&r" (temp),
404 		  "+" GCC_OFF_SMALL_ASM() (v->counter)
405 		: "Ir" (i));
406 	} else {
407 		unsigned long flags;
408 
409 		raw_local_irq_save(flags);
410 		result = v->counter;
411 		result -= i;
412 		if (result >= 0)
413 			v->counter = result;
414 		raw_local_irq_restore(flags);
415 	}
416 
417 	smp_llsc_mb();
418 
419 	return result;
420 }
421 
422 #define atomic64_cmpxchg(v, o, n) \
423 	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
424 #define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
425 
426 /*
427  * atomic64_dec_if_positive - decrement by 1 if old value positive
428  * @v: pointer of type atomic64_t
429  */
430 #define atomic64_dec_if_positive(v)	atomic64_sub_if_positive(1, v)
431 
432 #endif /* CONFIG_64BIT */
433 
434 #endif /* _ASM_ATOMIC_H */
435