xref: /openbmc/linux/arch/mips/include/asm/atomic.h (revision cd238eff)
1 /*
2  * Atomic operations that C can't guarantee us.  Useful for
3  * resource counting etc..
4  *
5  * But use these as seldom as possible since they are much more slower
6  * than regular operations.
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  *
12  * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
13  */
14 #ifndef _ASM_ATOMIC_H
15 #define _ASM_ATOMIC_H
16 
17 #include <linux/irqflags.h>
18 #include <linux/types.h>
19 #include <asm/barrier.h>
20 #include <asm/compiler.h>
21 #include <asm/cpu-features.h>
22 #include <asm/cmpxchg.h>
23 #include <asm/war.h>
24 
25 /*
26  * Using a branch-likely instruction to check the result of an sc instruction
27  * works around a bug present in R10000 CPUs prior to revision 3.0 that could
28  * cause ll-sc sequences to execute non-atomically.
29  */
30 #if R10000_LLSC_WAR
31 # define __scbeqz "beqzl"
32 #else
33 # define __scbeqz "beqz"
34 #endif
35 
36 #define ATOMIC_INIT(i)	  { (i) }
37 
38 /*
39  * atomic_read - read atomic variable
40  * @v: pointer of type atomic_t
41  *
42  * Atomically reads the value of @v.
43  */
44 #define atomic_read(v)		READ_ONCE((v)->counter)
45 
46 /*
47  * atomic_set - set atomic variable
48  * @v: pointer of type atomic_t
49  * @i: required value
50  *
51  * Atomically sets the value of @v to @i.
52  */
53 #define atomic_set(v, i)	WRITE_ONCE((v)->counter, (i))
54 
55 #define ATOMIC_OP(op, c_op, asm_op)					      \
56 static __inline__ void atomic_##op(int i, atomic_t * v)			      \
57 {									      \
58 	if (kernel_uses_llsc) {						      \
59 		int temp;						      \
60 									      \
61 		loongson_llsc_mb();					      \
62 		__asm__ __volatile__(					      \
63 		"	.set	push					\n"   \
64 		"	.set	"MIPS_ISA_LEVEL"			\n"   \
65 		"1:	ll	%0, %1		# atomic_" #op "	\n"   \
66 		"	" #asm_op " %0, %2				\n"   \
67 		"	sc	%0, %1					\n"   \
68 		"\t" __scbeqz "	%0, 1b					\n"   \
69 		"	.set	pop					\n"   \
70 		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)	      \
71 		: "Ir" (i));						      \
72 	} else {							      \
73 		unsigned long flags;					      \
74 									      \
75 		raw_local_irq_save(flags);				      \
76 		v->counter c_op i;					      \
77 		raw_local_irq_restore(flags);				      \
78 	}								      \
79 }
80 
81 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				      \
82 static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v)	      \
83 {									      \
84 	int result;							      \
85 									      \
86 	if (kernel_uses_llsc) {						      \
87 		int temp;						      \
88 									      \
89 		loongson_llsc_mb();					      \
90 		__asm__ __volatile__(					      \
91 		"	.set	push					\n"   \
92 		"	.set	"MIPS_ISA_LEVEL"			\n"   \
93 		"1:	ll	%1, %2		# atomic_" #op "_return	\n"   \
94 		"	" #asm_op " %0, %1, %3				\n"   \
95 		"	sc	%0, %2					\n"   \
96 		"\t" __scbeqz "	%0, 1b					\n"   \
97 		"	" #asm_op " %0, %1, %3				\n"   \
98 		"	.set	pop					\n"   \
99 		: "=&r" (result), "=&r" (temp),				      \
100 		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
101 		: "Ir" (i));						      \
102 	} else {							      \
103 		unsigned long flags;					      \
104 									      \
105 		raw_local_irq_save(flags);				      \
106 		result = v->counter;					      \
107 		result c_op i;						      \
108 		v->counter = result;					      \
109 		raw_local_irq_restore(flags);				      \
110 	}								      \
111 									      \
112 	return result;							      \
113 }
114 
115 #define ATOMIC_FETCH_OP(op, c_op, asm_op)				      \
116 static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v)	      \
117 {									      \
118 	int result;							      \
119 									      \
120 	if (kernel_uses_llsc) {						      \
121 		int temp;						      \
122 									      \
123 		loongson_llsc_mb();					      \
124 		__asm__ __volatile__(					      \
125 		"	.set	push					\n"   \
126 		"	.set	"MIPS_ISA_LEVEL"			\n"   \
127 		"1:	ll	%1, %2		# atomic_fetch_" #op "	\n"   \
128 		"	" #asm_op " %0, %1, %3				\n"   \
129 		"	sc	%0, %2					\n"   \
130 		"\t" __scbeqz "	%0, 1b					\n"   \
131 		"	.set	pop					\n"   \
132 		"	move	%0, %1					\n"   \
133 		: "=&r" (result), "=&r" (temp),				      \
134 		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
135 		: "Ir" (i));						      \
136 	} else {							      \
137 		unsigned long flags;					      \
138 									      \
139 		raw_local_irq_save(flags);				      \
140 		result = v->counter;					      \
141 		v->counter c_op i;					      \
142 		raw_local_irq_restore(flags);				      \
143 	}								      \
144 									      \
145 	return result;							      \
146 }
147 
148 #define ATOMIC_OPS(op, c_op, asm_op)					      \
149 	ATOMIC_OP(op, c_op, asm_op)					      \
150 	ATOMIC_OP_RETURN(op, c_op, asm_op)				      \
151 	ATOMIC_FETCH_OP(op, c_op, asm_op)
152 
153 ATOMIC_OPS(add, +=, addu)
154 ATOMIC_OPS(sub, -=, subu)
155 
156 #define atomic_add_return_relaxed	atomic_add_return_relaxed
157 #define atomic_sub_return_relaxed	atomic_sub_return_relaxed
158 #define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
159 #define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed
160 
161 #undef ATOMIC_OPS
162 #define ATOMIC_OPS(op, c_op, asm_op)					      \
163 	ATOMIC_OP(op, c_op, asm_op)					      \
164 	ATOMIC_FETCH_OP(op, c_op, asm_op)
165 
166 ATOMIC_OPS(and, &=, and)
167 ATOMIC_OPS(or, |=, or)
168 ATOMIC_OPS(xor, ^=, xor)
169 
170 #define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
171 #define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
172 #define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
173 
174 #undef ATOMIC_OPS
175 #undef ATOMIC_FETCH_OP
176 #undef ATOMIC_OP_RETURN
177 #undef ATOMIC_OP
178 
179 /*
180  * atomic_sub_if_positive - conditionally subtract integer from atomic variable
181  * @i: integer value to subtract
182  * @v: pointer of type atomic_t
183  *
184  * Atomically test @v and subtract @i if @v is greater or equal than @i.
185  * The function returns the old value of @v minus @i.
186  */
187 static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
188 {
189 	int result;
190 
191 	smp_mb__before_llsc();
192 
193 	if (kernel_uses_llsc) {
194 		int temp;
195 
196 		__asm__ __volatile__(
197 		"	.set	push					\n"
198 		"	.set	"MIPS_ISA_LEVEL"			\n"
199 		"1:	ll	%1, %2		# atomic_sub_if_positive\n"
200 		"	.set	pop					\n"
201 		"	subu	%0, %1, %3				\n"
202 		"	move	%1, %0					\n"
203 		"	bltz	%0, 1f					\n"
204 		"	.set	push					\n"
205 		"	.set	"MIPS_ISA_LEVEL"			\n"
206 		"	sc	%1, %2					\n"
207 		"\t" __scbeqz "	%1, 1b					\n"
208 		"1:							\n"
209 		"	.set	pop					\n"
210 		: "=&r" (result), "=&r" (temp),
211 		  "+" GCC_OFF_SMALL_ASM() (v->counter)
212 		: "Ir" (i));
213 	} else {
214 		unsigned long flags;
215 
216 		raw_local_irq_save(flags);
217 		result = v->counter;
218 		result -= i;
219 		if (result >= 0)
220 			v->counter = result;
221 		raw_local_irq_restore(flags);
222 	}
223 
224 	smp_llsc_mb();
225 
226 	return result;
227 }
228 
229 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
230 #define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
231 
232 /*
233  * atomic_dec_if_positive - decrement by 1 if old value positive
234  * @v: pointer of type atomic_t
235  */
236 #define atomic_dec_if_positive(v)	atomic_sub_if_positive(1, v)
237 
238 #ifdef CONFIG_64BIT
239 
240 #define ATOMIC64_INIT(i)    { (i) }
241 
242 /*
243  * atomic64_read - read atomic variable
244  * @v: pointer of type atomic64_t
245  *
246  */
247 #define atomic64_read(v)	READ_ONCE((v)->counter)
248 
249 /*
250  * atomic64_set - set atomic variable
251  * @v: pointer of type atomic64_t
252  * @i: required value
253  */
254 #define atomic64_set(v, i)	WRITE_ONCE((v)->counter, (i))
255 
256 #define ATOMIC64_OP(op, c_op, asm_op)					      \
257 static __inline__ void atomic64_##op(long i, atomic64_t * v)		      \
258 {									      \
259 	if (kernel_uses_llsc) {						      \
260 		long temp;						      \
261 									      \
262 		loongson_llsc_mb();					      \
263 		__asm__ __volatile__(					      \
264 		"	.set	push					\n"   \
265 		"	.set	"MIPS_ISA_LEVEL"			\n"   \
266 		"1:	lld	%0, %1		# atomic64_" #op "	\n"   \
267 		"	" #asm_op " %0, %2				\n"   \
268 		"	scd	%0, %1					\n"   \
269 		"\t" __scbeqz "	%0, 1b					\n"   \
270 		"	.set	pop					\n"   \
271 		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)	      \
272 		: "Ir" (i));						      \
273 	} else {							      \
274 		unsigned long flags;					      \
275 									      \
276 		raw_local_irq_save(flags);				      \
277 		v->counter c_op i;					      \
278 		raw_local_irq_restore(flags);				      \
279 	}								      \
280 }
281 
282 #define ATOMIC64_OP_RETURN(op, c_op, asm_op)				      \
283 static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
284 {									      \
285 	long result;							      \
286 									      \
287 	if (kernel_uses_llsc) {						      \
288 		long temp;						      \
289 									      \
290 		loongson_llsc_mb();					      \
291 		__asm__ __volatile__(					      \
292 		"	.set	push					\n"   \
293 		"	.set	"MIPS_ISA_LEVEL"			\n"   \
294 		"1:	lld	%1, %2		# atomic64_" #op "_return\n"  \
295 		"	" #asm_op " %0, %1, %3				\n"   \
296 		"	scd	%0, %2					\n"   \
297 		"\t" __scbeqz "	%0, 1b					\n"   \
298 		"	" #asm_op " %0, %1, %3				\n"   \
299 		"	.set	pop					\n"   \
300 		: "=&r" (result), "=&r" (temp),				      \
301 		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
302 		: "Ir" (i));						      \
303 	} else {							      \
304 		unsigned long flags;					      \
305 									      \
306 		raw_local_irq_save(flags);				      \
307 		result = v->counter;					      \
308 		result c_op i;						      \
309 		v->counter = result;					      \
310 		raw_local_irq_restore(flags);				      \
311 	}								      \
312 									      \
313 	return result;							      \
314 }
315 
316 #define ATOMIC64_FETCH_OP(op, c_op, asm_op)				      \
317 static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v)  \
318 {									      \
319 	long result;							      \
320 									      \
321 	if (kernel_uses_llsc) {						      \
322 		long temp;						      \
323 									      \
324 		loongson_llsc_mb();					      \
325 		__asm__ __volatile__(					      \
326 		"	.set	push					\n"   \
327 		"	.set	"MIPS_ISA_LEVEL"			\n"   \
328 		"1:	lld	%1, %2		# atomic64_fetch_" #op "\n"   \
329 		"	" #asm_op " %0, %1, %3				\n"   \
330 		"	scd	%0, %2					\n"   \
331 		"\t" __scbeqz "	%0, 1b					\n"   \
332 		"	move	%0, %1					\n"   \
333 		"	.set	pop					\n"   \
334 		: "=&r" (result), "=&r" (temp),				      \
335 		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
336 		: "Ir" (i));						      \
337 	} else {							      \
338 		unsigned long flags;					      \
339 									      \
340 		raw_local_irq_save(flags);				      \
341 		result = v->counter;					      \
342 		v->counter c_op i;					      \
343 		raw_local_irq_restore(flags);				      \
344 	}								      \
345 									      \
346 	return result;							      \
347 }
348 
349 #define ATOMIC64_OPS(op, c_op, asm_op)					      \
350 	ATOMIC64_OP(op, c_op, asm_op)					      \
351 	ATOMIC64_OP_RETURN(op, c_op, asm_op)				      \
352 	ATOMIC64_FETCH_OP(op, c_op, asm_op)
353 
354 ATOMIC64_OPS(add, +=, daddu)
355 ATOMIC64_OPS(sub, -=, dsubu)
356 
357 #define atomic64_add_return_relaxed	atomic64_add_return_relaxed
358 #define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
359 #define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
360 #define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed
361 
362 #undef ATOMIC64_OPS
363 #define ATOMIC64_OPS(op, c_op, asm_op)					      \
364 	ATOMIC64_OP(op, c_op, asm_op)					      \
365 	ATOMIC64_FETCH_OP(op, c_op, asm_op)
366 
367 ATOMIC64_OPS(and, &=, and)
368 ATOMIC64_OPS(or, |=, or)
369 ATOMIC64_OPS(xor, ^=, xor)
370 
371 #define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
372 #define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
373 #define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed
374 
375 #undef ATOMIC64_OPS
376 #undef ATOMIC64_FETCH_OP
377 #undef ATOMIC64_OP_RETURN
378 #undef ATOMIC64_OP
379 
380 /*
381  * atomic64_sub_if_positive - conditionally subtract integer from atomic
382  *                            variable
383  * @i: integer value to subtract
384  * @v: pointer of type atomic64_t
385  *
386  * Atomically test @v and subtract @i if @v is greater or equal than @i.
387  * The function returns the old value of @v minus @i.
388  */
389 static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
390 {
391 	long result;
392 
393 	smp_mb__before_llsc();
394 
395 	if (kernel_uses_llsc) {
396 		long temp;
397 
398 		__asm__ __volatile__(
399 		"	.set	push					\n"
400 		"	.set	"MIPS_ISA_LEVEL"			\n"
401 		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"
402 		"	dsubu	%0, %1, %3				\n"
403 		"	move	%1, %0					\n"
404 		"	bltz	%0, 1f					\n"
405 		"	scd	%1, %2					\n"
406 		"\t" __scbeqz "	%1, 1b					\n"
407 		"1:							\n"
408 		"	.set	pop					\n"
409 		: "=&r" (result), "=&r" (temp),
410 		  "+" GCC_OFF_SMALL_ASM() (v->counter)
411 		: "Ir" (i));
412 	} else {
413 		unsigned long flags;
414 
415 		raw_local_irq_save(flags);
416 		result = v->counter;
417 		result -= i;
418 		if (result >= 0)
419 			v->counter = result;
420 		raw_local_irq_restore(flags);
421 	}
422 
423 	smp_llsc_mb();
424 
425 	return result;
426 }
427 
428 #define atomic64_cmpxchg(v, o, n) \
429 	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
430 #define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
431 
432 /*
433  * atomic64_dec_if_positive - decrement by 1 if old value positive
434  * @v: pointer of type atomic64_t
435  */
436 #define atomic64_dec_if_positive(v)	atomic64_sub_if_positive(1, v)
437 
438 #endif /* CONFIG_64BIT */
439 
440 #endif /* _ASM_ATOMIC_H */
441