xref: /openbmc/linux/arch/mips/include/asm/atomic.h (revision a5a7daa5)
1 /*
2  * Atomic operations that C can't guarantee us.  Useful for
3  * resource counting etc..
4  *
5  * But use these as seldom as possible since they are much more slower
6  * than regular operations.
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  *
12  * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
13  */
14 #ifndef _ASM_ATOMIC_H
15 #define _ASM_ATOMIC_H
16 
17 #include <linux/irqflags.h>
18 #include <linux/types.h>
19 #include <asm/barrier.h>
20 #include <asm/compiler.h>
21 #include <asm/cpu-features.h>
22 #include <asm/cmpxchg.h>
23 #include <asm/war.h>
24 
25 /*
26  * Using a branch-likely instruction to check the result of an sc instruction
27  * works around a bug present in R10000 CPUs prior to revision 3.0 that could
28  * cause ll-sc sequences to execute non-atomically.
29  */
30 #if R10000_LLSC_WAR
31 # define __scbeqz "beqzl"
32 #else
33 # define __scbeqz "beqz"
34 #endif
35 
36 #define ATOMIC_INIT(i)	  { (i) }
37 
38 /*
39  * atomic_read - read atomic variable
40  * @v: pointer of type atomic_t
41  *
42  * Atomically reads the value of @v.
43  */
44 #define atomic_read(v)		READ_ONCE((v)->counter)
45 
46 /*
47  * atomic_set - set atomic variable
48  * @v: pointer of type atomic_t
49  * @i: required value
50  *
51  * Atomically sets the value of @v to @i.
52  */
53 #define atomic_set(v, i)	WRITE_ONCE((v)->counter, (i))
54 
55 #define ATOMIC_OP(op, c_op, asm_op)					      \
56 static __inline__ void atomic_##op(int i, atomic_t * v)			      \
57 {									      \
58 	if (kernel_uses_llsc) {						      \
59 		int temp;						      \
60 									      \
61 		loongson_llsc_mb();					      \
62 		__asm__ __volatile__(					      \
63 		"	.set	push					\n"   \
64 		"	.set	"MIPS_ISA_LEVEL"			\n"   \
65 		"1:	ll	%0, %1		# atomic_" #op "	\n"   \
66 		"	" #asm_op " %0, %2				\n"   \
67 		"	sc	%0, %1					\n"   \
68 		"\t" __scbeqz "	%0, 1b					\n"   \
69 		"	.set	pop					\n"   \
70 		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)	      \
71 		: "Ir" (i) : __LLSC_CLOBBER);				      \
72 	} else {							      \
73 		unsigned long flags;					      \
74 									      \
75 		raw_local_irq_save(flags);				      \
76 		v->counter c_op i;					      \
77 		raw_local_irq_restore(flags);				      \
78 	}								      \
79 }
80 
81 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				      \
82 static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v)	      \
83 {									      \
84 	int result;							      \
85 									      \
86 	if (kernel_uses_llsc) {						      \
87 		int temp;						      \
88 									      \
89 		loongson_llsc_mb();					      \
90 		__asm__ __volatile__(					      \
91 		"	.set	push					\n"   \
92 		"	.set	"MIPS_ISA_LEVEL"			\n"   \
93 		"1:	ll	%1, %2		# atomic_" #op "_return	\n"   \
94 		"	" #asm_op " %0, %1, %3				\n"   \
95 		"	sc	%0, %2					\n"   \
96 		"\t" __scbeqz "	%0, 1b					\n"   \
97 		"	" #asm_op " %0, %1, %3				\n"   \
98 		"	.set	pop					\n"   \
99 		: "=&r" (result), "=&r" (temp),				      \
100 		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
101 		: "Ir" (i) : __LLSC_CLOBBER);				      \
102 	} else {							      \
103 		unsigned long flags;					      \
104 									      \
105 		raw_local_irq_save(flags);				      \
106 		result = v->counter;					      \
107 		result c_op i;						      \
108 		v->counter = result;					      \
109 		raw_local_irq_restore(flags);				      \
110 	}								      \
111 									      \
112 	return result;							      \
113 }
114 
115 #define ATOMIC_FETCH_OP(op, c_op, asm_op)				      \
116 static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v)	      \
117 {									      \
118 	int result;							      \
119 									      \
120 	if (kernel_uses_llsc) {						      \
121 		int temp;						      \
122 									      \
123 		loongson_llsc_mb();					      \
124 		__asm__ __volatile__(					      \
125 		"	.set	push					\n"   \
126 		"	.set	"MIPS_ISA_LEVEL"			\n"   \
127 		"1:	ll	%1, %2		# atomic_fetch_" #op "	\n"   \
128 		"	" #asm_op " %0, %1, %3				\n"   \
129 		"	sc	%0, %2					\n"   \
130 		"\t" __scbeqz "	%0, 1b					\n"   \
131 		"	.set	pop					\n"   \
132 		"	move	%0, %1					\n"   \
133 		: "=&r" (result), "=&r" (temp),				      \
134 		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
135 		: "Ir" (i) : __LLSC_CLOBBER);				      \
136 	} else {							      \
137 		unsigned long flags;					      \
138 									      \
139 		raw_local_irq_save(flags);				      \
140 		result = v->counter;					      \
141 		v->counter c_op i;					      \
142 		raw_local_irq_restore(flags);				      \
143 	}								      \
144 									      \
145 	return result;							      \
146 }
147 
148 #define ATOMIC_OPS(op, c_op, asm_op)					      \
149 	ATOMIC_OP(op, c_op, asm_op)					      \
150 	ATOMIC_OP_RETURN(op, c_op, asm_op)				      \
151 	ATOMIC_FETCH_OP(op, c_op, asm_op)
152 
153 ATOMIC_OPS(add, +=, addu)
154 ATOMIC_OPS(sub, -=, subu)
155 
156 #define atomic_add_return_relaxed	atomic_add_return_relaxed
157 #define atomic_sub_return_relaxed	atomic_sub_return_relaxed
158 #define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
159 #define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed
160 
161 #undef ATOMIC_OPS
162 #define ATOMIC_OPS(op, c_op, asm_op)					      \
163 	ATOMIC_OP(op, c_op, asm_op)					      \
164 	ATOMIC_FETCH_OP(op, c_op, asm_op)
165 
166 ATOMIC_OPS(and, &=, and)
167 ATOMIC_OPS(or, |=, or)
168 ATOMIC_OPS(xor, ^=, xor)
169 
170 #define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
171 #define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
172 #define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
173 
174 #undef ATOMIC_OPS
175 #undef ATOMIC_FETCH_OP
176 #undef ATOMIC_OP_RETURN
177 #undef ATOMIC_OP
178 
179 /*
180  * atomic_sub_if_positive - conditionally subtract integer from atomic variable
181  * @i: integer value to subtract
182  * @v: pointer of type atomic_t
183  *
184  * Atomically test @v and subtract @i if @v is greater or equal than @i.
185  * The function returns the old value of @v minus @i.
186  */
187 static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
188 {
189 	int result;
190 
191 	smp_mb__before_llsc();
192 
193 	if (kernel_uses_llsc) {
194 		int temp;
195 
196 		loongson_llsc_mb();
197 		__asm__ __volatile__(
198 		"	.set	push					\n"
199 		"	.set	"MIPS_ISA_LEVEL"			\n"
200 		"1:	ll	%1, %2		# atomic_sub_if_positive\n"
201 		"	.set	pop					\n"
202 		"	subu	%0, %1, %3				\n"
203 		"	move	%1, %0					\n"
204 		"	bltz	%0, 2f					\n"
205 		"	.set	push					\n"
206 		"	.set	"MIPS_ISA_LEVEL"			\n"
207 		"	sc	%1, %2					\n"
208 		"\t" __scbeqz "	%1, 1b					\n"
209 		"2:							\n"
210 		"	.set	pop					\n"
211 		: "=&r" (result), "=&r" (temp),
212 		  "+" GCC_OFF_SMALL_ASM() (v->counter)
213 		: "Ir" (i) : __LLSC_CLOBBER);
214 	} else {
215 		unsigned long flags;
216 
217 		raw_local_irq_save(flags);
218 		result = v->counter;
219 		result -= i;
220 		if (result >= 0)
221 			v->counter = result;
222 		raw_local_irq_restore(flags);
223 	}
224 
225 	smp_llsc_mb();
226 
227 	return result;
228 }
229 
230 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
231 #define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
232 
233 /*
234  * atomic_dec_if_positive - decrement by 1 if old value positive
235  * @v: pointer of type atomic_t
236  */
237 #define atomic_dec_if_positive(v)	atomic_sub_if_positive(1, v)
238 
239 #ifdef CONFIG_64BIT
240 
241 #define ATOMIC64_INIT(i)    { (i) }
242 
243 /*
244  * atomic64_read - read atomic variable
245  * @v: pointer of type atomic64_t
246  *
247  */
248 #define atomic64_read(v)	READ_ONCE((v)->counter)
249 
250 /*
251  * atomic64_set - set atomic variable
252  * @v: pointer of type atomic64_t
253  * @i: required value
254  */
255 #define atomic64_set(v, i)	WRITE_ONCE((v)->counter, (i))
256 
257 #define ATOMIC64_OP(op, c_op, asm_op)					      \
258 static __inline__ void atomic64_##op(s64 i, atomic64_t * v)		      \
259 {									      \
260 	if (kernel_uses_llsc) {						      \
261 		s64 temp;						      \
262 									      \
263 		loongson_llsc_mb();					      \
264 		__asm__ __volatile__(					      \
265 		"	.set	push					\n"   \
266 		"	.set	"MIPS_ISA_LEVEL"			\n"   \
267 		"1:	lld	%0, %1		# atomic64_" #op "	\n"   \
268 		"	" #asm_op " %0, %2				\n"   \
269 		"	scd	%0, %1					\n"   \
270 		"\t" __scbeqz "	%0, 1b					\n"   \
271 		"	.set	pop					\n"   \
272 		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)	      \
273 		: "Ir" (i) : __LLSC_CLOBBER);				      \
274 	} else {							      \
275 		unsigned long flags;					      \
276 									      \
277 		raw_local_irq_save(flags);				      \
278 		v->counter c_op i;					      \
279 		raw_local_irq_restore(flags);				      \
280 	}								      \
281 }
282 
283 #define ATOMIC64_OP_RETURN(op, c_op, asm_op)				      \
284 static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v)   \
285 {									      \
286 	s64 result;							      \
287 									      \
288 	if (kernel_uses_llsc) {						      \
289 		s64 temp;						      \
290 									      \
291 		loongson_llsc_mb();					      \
292 		__asm__ __volatile__(					      \
293 		"	.set	push					\n"   \
294 		"	.set	"MIPS_ISA_LEVEL"			\n"   \
295 		"1:	lld	%1, %2		# atomic64_" #op "_return\n"  \
296 		"	" #asm_op " %0, %1, %3				\n"   \
297 		"	scd	%0, %2					\n"   \
298 		"\t" __scbeqz "	%0, 1b					\n"   \
299 		"	" #asm_op " %0, %1, %3				\n"   \
300 		"	.set	pop					\n"   \
301 		: "=&r" (result), "=&r" (temp),				      \
302 		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
303 		: "Ir" (i) : __LLSC_CLOBBER);				      \
304 	} else {							      \
305 		unsigned long flags;					      \
306 									      \
307 		raw_local_irq_save(flags);				      \
308 		result = v->counter;					      \
309 		result c_op i;						      \
310 		v->counter = result;					      \
311 		raw_local_irq_restore(flags);				      \
312 	}								      \
313 									      \
314 	return result;							      \
315 }
316 
317 #define ATOMIC64_FETCH_OP(op, c_op, asm_op)				      \
318 static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v)    \
319 {									      \
320 	s64 result;							      \
321 									      \
322 	if (kernel_uses_llsc) {						      \
323 		s64 temp;						      \
324 									      \
325 		loongson_llsc_mb();					      \
326 		__asm__ __volatile__(					      \
327 		"	.set	push					\n"   \
328 		"	.set	"MIPS_ISA_LEVEL"			\n"   \
329 		"1:	lld	%1, %2		# atomic64_fetch_" #op "\n"   \
330 		"	" #asm_op " %0, %1, %3				\n"   \
331 		"	scd	%0, %2					\n"   \
332 		"\t" __scbeqz "	%0, 1b					\n"   \
333 		"	move	%0, %1					\n"   \
334 		"	.set	pop					\n"   \
335 		: "=&r" (result), "=&r" (temp),				      \
336 		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
337 		: "Ir" (i) : __LLSC_CLOBBER);				      \
338 	} else {							      \
339 		unsigned long flags;					      \
340 									      \
341 		raw_local_irq_save(flags);				      \
342 		result = v->counter;					      \
343 		v->counter c_op i;					      \
344 		raw_local_irq_restore(flags);				      \
345 	}								      \
346 									      \
347 	return result;							      \
348 }
349 
350 #define ATOMIC64_OPS(op, c_op, asm_op)					      \
351 	ATOMIC64_OP(op, c_op, asm_op)					      \
352 	ATOMIC64_OP_RETURN(op, c_op, asm_op)				      \
353 	ATOMIC64_FETCH_OP(op, c_op, asm_op)
354 
355 ATOMIC64_OPS(add, +=, daddu)
356 ATOMIC64_OPS(sub, -=, dsubu)
357 
358 #define atomic64_add_return_relaxed	atomic64_add_return_relaxed
359 #define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
360 #define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
361 #define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed
362 
363 #undef ATOMIC64_OPS
364 #define ATOMIC64_OPS(op, c_op, asm_op)					      \
365 	ATOMIC64_OP(op, c_op, asm_op)					      \
366 	ATOMIC64_FETCH_OP(op, c_op, asm_op)
367 
368 ATOMIC64_OPS(and, &=, and)
369 ATOMIC64_OPS(or, |=, or)
370 ATOMIC64_OPS(xor, ^=, xor)
371 
372 #define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
373 #define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
374 #define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed
375 
376 #undef ATOMIC64_OPS
377 #undef ATOMIC64_FETCH_OP
378 #undef ATOMIC64_OP_RETURN
379 #undef ATOMIC64_OP
380 
381 /*
382  * atomic64_sub_if_positive - conditionally subtract integer from atomic
383  *                            variable
384  * @i: integer value to subtract
385  * @v: pointer of type atomic64_t
386  *
387  * Atomically test @v and subtract @i if @v is greater or equal than @i.
388  * The function returns the old value of @v minus @i.
389  */
390 static __inline__ s64 atomic64_sub_if_positive(s64 i, atomic64_t * v)
391 {
392 	s64 result;
393 
394 	smp_mb__before_llsc();
395 
396 	if (kernel_uses_llsc) {
397 		s64 temp;
398 
399 		__asm__ __volatile__(
400 		"	.set	push					\n"
401 		"	.set	"MIPS_ISA_LEVEL"			\n"
402 		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"
403 		"	dsubu	%0, %1, %3				\n"
404 		"	move	%1, %0					\n"
405 		"	bltz	%0, 1f					\n"
406 		"	scd	%1, %2					\n"
407 		"\t" __scbeqz "	%1, 1b					\n"
408 		"1:							\n"
409 		"	.set	pop					\n"
410 		: "=&r" (result), "=&r" (temp),
411 		  "+" GCC_OFF_SMALL_ASM() (v->counter)
412 		: "Ir" (i));
413 	} else {
414 		unsigned long flags;
415 
416 		raw_local_irq_save(flags);
417 		result = v->counter;
418 		result -= i;
419 		if (result >= 0)
420 			v->counter = result;
421 		raw_local_irq_restore(flags);
422 	}
423 
424 	smp_llsc_mb();
425 
426 	return result;
427 }
428 
429 #define atomic64_cmpxchg(v, o, n) \
430 	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
431 #define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
432 
433 /*
434  * atomic64_dec_if_positive - decrement by 1 if old value positive
435  * @v: pointer of type atomic64_t
436  */
437 #define atomic64_dec_if_positive(v)	atomic64_sub_if_positive(1, v)
438 
439 #endif /* CONFIG_64BIT */
440 
441 #endif /* _ASM_ATOMIC_H */
442