xref: /openbmc/linux/arch/alpha/include/asm/atomic.h (revision abfbd895)
1 #ifndef _ALPHA_ATOMIC_H
2 #define _ALPHA_ATOMIC_H
3 
4 #include <linux/types.h>
5 #include <asm/barrier.h>
6 #include <asm/cmpxchg.h>
7 
8 /*
9  * Atomic operations that C can't guarantee us.  Useful for
10  * resource counting etc...
11  *
12  * But use these as seldom as possible since they are much slower
13  * than regular operations.
14  */
15 
16 
17 #define ATOMIC_INIT(i)		{ (i) }
18 #define ATOMIC64_INIT(i)	{ (i) }
19 
20 #define atomic_read(v)		READ_ONCE((v)->counter)
21 #define atomic64_read(v)	READ_ONCE((v)->counter)
22 
23 #define atomic_set(v,i)		WRITE_ONCE((v)->counter, (i))
24 #define atomic64_set(v,i)	WRITE_ONCE((v)->counter, (i))
25 
26 /*
27  * To get proper branch prediction for the main line, we must branch
28  * forward to code at the end of this object's .text section, then
29  * branch back to restart the operation.
30  */
31 
32 #define ATOMIC_OP(op, asm_op)						\
33 static __inline__ void atomic_##op(int i, atomic_t * v)			\
34 {									\
35 	unsigned long temp;						\
36 	__asm__ __volatile__(						\
37 	"1:	ldl_l %0,%1\n"						\
38 	"	" #asm_op " %0,%2,%0\n"					\
39 	"	stl_c %0,%1\n"						\
40 	"	beq %0,2f\n"						\
41 	".subsection 2\n"						\
42 	"2:	br 1b\n"						\
43 	".previous"							\
44 	:"=&r" (temp), "=m" (v->counter)				\
45 	:"Ir" (i), "m" (v->counter));					\
46 }									\
47 
48 #define ATOMIC_OP_RETURN(op, asm_op)					\
49 static inline int atomic_##op##_return(int i, atomic_t *v)		\
50 {									\
51 	long temp, result;						\
52 	smp_mb();							\
53 	__asm__ __volatile__(						\
54 	"1:	ldl_l %0,%1\n"						\
55 	"	" #asm_op " %0,%3,%2\n"					\
56 	"	" #asm_op " %0,%3,%0\n"					\
57 	"	stl_c %0,%1\n"						\
58 	"	beq %0,2f\n"						\
59 	".subsection 2\n"						\
60 	"2:	br 1b\n"						\
61 	".previous"							\
62 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
63 	:"Ir" (i), "m" (v->counter) : "memory");			\
64 	smp_mb();							\
65 	return result;							\
66 }
67 
68 #define ATOMIC64_OP(op, asm_op)						\
69 static __inline__ void atomic64_##op(long i, atomic64_t * v)		\
70 {									\
71 	unsigned long temp;						\
72 	__asm__ __volatile__(						\
73 	"1:	ldq_l %0,%1\n"						\
74 	"	" #asm_op " %0,%2,%0\n"					\
75 	"	stq_c %0,%1\n"						\
76 	"	beq %0,2f\n"						\
77 	".subsection 2\n"						\
78 	"2:	br 1b\n"						\
79 	".previous"							\
80 	:"=&r" (temp), "=m" (v->counter)				\
81 	:"Ir" (i), "m" (v->counter));					\
82 }									\
83 
84 #define ATOMIC64_OP_RETURN(op, asm_op)					\
85 static __inline__ long atomic64_##op##_return(long i, atomic64_t * v)	\
86 {									\
87 	long temp, result;						\
88 	smp_mb();							\
89 	__asm__ __volatile__(						\
90 	"1:	ldq_l %0,%1\n"						\
91 	"	" #asm_op " %0,%3,%2\n"					\
92 	"	" #asm_op " %0,%3,%0\n"					\
93 	"	stq_c %0,%1\n"						\
94 	"	beq %0,2f\n"						\
95 	".subsection 2\n"						\
96 	"2:	br 1b\n"						\
97 	".previous"							\
98 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
99 	:"Ir" (i), "m" (v->counter) : "memory");			\
100 	smp_mb();							\
101 	return result;							\
102 }
103 
104 #define ATOMIC_OPS(op)							\
105 	ATOMIC_OP(op, op##l)						\
106 	ATOMIC_OP_RETURN(op, op##l)					\
107 	ATOMIC64_OP(op, op##q)						\
108 	ATOMIC64_OP_RETURN(op, op##q)
109 
110 ATOMIC_OPS(add)
111 ATOMIC_OPS(sub)
112 
113 #define atomic_andnot atomic_andnot
114 #define atomic64_andnot atomic64_andnot
115 
116 ATOMIC_OP(and, and)
117 ATOMIC_OP(andnot, bic)
118 ATOMIC_OP(or, bis)
119 ATOMIC_OP(xor, xor)
120 ATOMIC64_OP(and, and)
121 ATOMIC64_OP(andnot, bic)
122 ATOMIC64_OP(or, bis)
123 ATOMIC64_OP(xor, xor)
124 
125 #undef ATOMIC_OPS
126 #undef ATOMIC64_OP_RETURN
127 #undef ATOMIC64_OP
128 #undef ATOMIC_OP_RETURN
129 #undef ATOMIC_OP
130 
131 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
132 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
133 
134 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
135 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
136 
137 /**
138  * __atomic_add_unless - add unless the number is a given value
139  * @v: pointer of type atomic_t
140  * @a: the amount to add to v...
141  * @u: ...unless v is equal to u.
142  *
143  * Atomically adds @a to @v, so long as it was not @u.
144  * Returns the old value of @v.
145  */
146 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
147 {
148 	int c, new, old;
149 	smp_mb();
150 	__asm__ __volatile__(
151 	"1:	ldl_l	%[old],%[mem]\n"
152 	"	cmpeq	%[old],%[u],%[c]\n"
153 	"	addl	%[old],%[a],%[new]\n"
154 	"	bne	%[c],2f\n"
155 	"	stl_c	%[new],%[mem]\n"
156 	"	beq	%[new],3f\n"
157 	"2:\n"
158 	".subsection 2\n"
159 	"3:	br	1b\n"
160 	".previous"
161 	: [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
162 	: [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
163 	: "memory");
164 	smp_mb();
165 	return old;
166 }
167 
168 
169 /**
170  * atomic64_add_unless - add unless the number is a given value
171  * @v: pointer of type atomic64_t
172  * @a: the amount to add to v...
173  * @u: ...unless v is equal to u.
174  *
175  * Atomically adds @a to @v, so long as it was not @u.
176  * Returns true iff @v was not @u.
177  */
178 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
179 {
180 	long c, tmp;
181 	smp_mb();
182 	__asm__ __volatile__(
183 	"1:	ldq_l	%[tmp],%[mem]\n"
184 	"	cmpeq	%[tmp],%[u],%[c]\n"
185 	"	addq	%[tmp],%[a],%[tmp]\n"
186 	"	bne	%[c],2f\n"
187 	"	stq_c	%[tmp],%[mem]\n"
188 	"	beq	%[tmp],3f\n"
189 	"2:\n"
190 	".subsection 2\n"
191 	"3:	br	1b\n"
192 	".previous"
193 	: [tmp] "=&r"(tmp), [c] "=&r"(c)
194 	: [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
195 	: "memory");
196 	smp_mb();
197 	return !c;
198 }
199 
200 /*
201  * atomic64_dec_if_positive - decrement by 1 if old value positive
202  * @v: pointer of type atomic_t
203  *
204  * The function returns the old value of *v minus 1, even if
205  * the atomic variable, v, was not decremented.
206  */
207 static inline long atomic64_dec_if_positive(atomic64_t *v)
208 {
209 	long old, tmp;
210 	smp_mb();
211 	__asm__ __volatile__(
212 	"1:	ldq_l	%[old],%[mem]\n"
213 	"	subq	%[old],1,%[tmp]\n"
214 	"	ble	%[old],2f\n"
215 	"	stq_c	%[tmp],%[mem]\n"
216 	"	beq	%[tmp],3f\n"
217 	"2:\n"
218 	".subsection 2\n"
219 	"3:	br	1b\n"
220 	".previous"
221 	: [old] "=&r"(old), [tmp] "=&r"(tmp)
222 	: [mem] "m"(*v)
223 	: "memory");
224 	smp_mb();
225 	return old - 1;
226 }
227 
228 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
229 
230 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
231 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
232 
233 #define atomic_dec_return(v) atomic_sub_return(1,(v))
234 #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
235 
236 #define atomic_inc_return(v) atomic_add_return(1,(v))
237 #define atomic64_inc_return(v) atomic64_add_return(1,(v))
238 
239 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
240 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
241 
242 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
243 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
244 
245 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
246 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
247 
248 #define atomic_inc(v) atomic_add(1,(v))
249 #define atomic64_inc(v) atomic64_add(1,(v))
250 
251 #define atomic_dec(v) atomic_sub(1,(v))
252 #define atomic64_dec(v) atomic64_sub(1,(v))
253 
254 #endif /* _ALPHA_ATOMIC_H */
255