xref: /openbmc/linux/arch/xtensa/include/asm/atomic.h (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 /*
2  * include/asm-xtensa/atomic.h
3  *
4  * Atomic operations that C can't guarantee us.  Useful for resource counting..
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  *
10  * Copyright (C) 2001 - 2005 Tensilica Inc.
11  */
12 
13 #ifndef _XTENSA_ATOMIC_H
14 #define _XTENSA_ATOMIC_H
15 
16 #include <linux/stringify.h>
17 
18 typedef struct { volatile int counter; } atomic_t;
19 
20 #ifdef __KERNEL__
21 #include <asm/processor.h>
22 #include <asm/system.h>
23 
24 #define ATOMIC_INIT(i)	{ (i) }
25 
26 /*
27  * This Xtensa implementation assumes that the right mechanism
28  * for exclusion is for locking interrupts to level 1.
29  *
30  * Locking interrupts looks like this:
31  *
32  *    rsil a15, 1
33  *    <code>
34  *    wsr  a15, PS
35  *    rsync
36  *
37  * Note that a15 is used here because the register allocation
38  * done by the compiler is not guaranteed and a window overflow
39  * may not occur between the rsil and wsr instructions. By using
40  * a15 in the rsil, the machine is guaranteed to be in a state
41  * where no register reference will cause an overflow.
42  */
43 
44 /**
45  * atomic_read - read atomic variable
46  * @v: pointer of type atomic_t
47  *
48  * Atomically reads the value of @v.
49  */
50 #define atomic_read(v)		((v)->counter)
51 
52 /**
53  * atomic_set - set atomic variable
54  * @v: pointer of type atomic_t
55  * @i: required value
56  *
57  * Atomically sets the value of @v to @i.
58  */
59 #define atomic_set(v,i)		((v)->counter = (i))
60 
61 /**
62  * atomic_add - add integer to atomic variable
63  * @i: integer value to add
64  * @v: pointer of type atomic_t
65  *
66  * Atomically adds @i to @v.
67  */
68 static inline void atomic_add(int i, atomic_t * v)
69 {
70     unsigned int vval;
71 
72     __asm__ __volatile__(
73 	"rsil    a15, "__stringify(LOCKLEVEL)"\n\t"
74 	"l32i    %0, %2, 0              \n\t"
75 	"add     %0, %0, %1             \n\t"
76 	"s32i    %0, %2, 0              \n\t"
77 	"wsr     a15, "__stringify(PS)"       \n\t"
78 	"rsync                          \n"
79 	: "=&a" (vval)
80 	: "a" (i), "a" (v)
81 	: "a15", "memory"
82 	);
83 }
84 
85 /**
86  * atomic_sub - subtract the atomic variable
87  * @i: integer value to subtract
88  * @v: pointer of type atomic_t
89  *
90  * Atomically subtracts @i from @v.
91  */
92 static inline void atomic_sub(int i, atomic_t *v)
93 {
94     unsigned int vval;
95 
96     __asm__ __volatile__(
97 	"rsil    a15, "__stringify(LOCKLEVEL)"\n\t"
98 	"l32i    %0, %2, 0              \n\t"
99 	"sub     %0, %0, %1             \n\t"
100 	"s32i    %0, %2, 0              \n\t"
101 	"wsr     a15, "__stringify(PS)"       \n\t"
102 	"rsync                          \n"
103 	: "=&a" (vval)
104 	: "a" (i), "a" (v)
105 	: "a15", "memory"
106 	);
107 }
108 
109 /*
110  * We use atomic_{add|sub}_return to define other functions.
111  */
112 
113 static inline int atomic_add_return(int i, atomic_t * v)
114 {
115      unsigned int vval;
116 
117     __asm__ __volatile__(
118 	"rsil    a15,"__stringify(LOCKLEVEL)"\n\t"
119 	"l32i    %0, %2, 0             \n\t"
120 	"add     %0, %0, %1            \n\t"
121 	"s32i    %0, %2, 0             \n\t"
122 	"wsr     a15, "__stringify(PS)"      \n\t"
123 	"rsync                         \n"
124 	: "=&a" (vval)
125 	: "a" (i), "a" (v)
126 	: "a15", "memory"
127 	);
128 
129     return vval;
130 }
131 
132 static inline int atomic_sub_return(int i, atomic_t * v)
133 {
134     unsigned int vval;
135 
136     __asm__ __volatile__(
137 	"rsil    a15,"__stringify(LOCKLEVEL)"\n\t"
138 	"l32i    %0, %2, 0             \n\t"
139 	"sub     %0, %0, %1            \n\t"
140 	"s32i    %0, %2, 0             \n\t"
141 	"wsr     a15, "__stringify(PS)"       \n\t"
142 	"rsync                         \n"
143 	: "=&a" (vval)
144 	: "a" (i), "a" (v)
145 	: "a15", "memory"
146 	);
147 
148     return vval;
149 }
150 
151 /**
152  * atomic_sub_and_test - subtract value from variable and test result
153  * @i: integer value to subtract
154  * @v: pointer of type atomic_t
155  *
156  * Atomically subtracts @i from @v and returns
157  * true if the result is zero, or false for all
158  * other cases.
159  */
160 #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
161 
162 /**
163  * atomic_inc - increment atomic variable
164  * @v: pointer of type atomic_t
165  *
166  * Atomically increments @v by 1.
167  */
168 #define atomic_inc(v) atomic_add(1,(v))
169 
170 /**
171  * atomic_inc - increment atomic variable
172  * @v: pointer of type atomic_t
173  *
174  * Atomically increments @v by 1.
175  */
176 #define atomic_inc_return(v) atomic_add_return(1,(v))
177 
178 /**
179  * atomic_dec - decrement atomic variable
180  * @v: pointer of type atomic_t
181  *
182  * Atomically decrements @v by 1.
183  */
184 #define atomic_dec(v) atomic_sub(1,(v))
185 
186 /**
187  * atomic_dec_return - decrement atomic variable
188  * @v: pointer of type atomic_t
189  *
190  * Atomically decrements @v by 1.
191  */
192 #define atomic_dec_return(v) atomic_sub_return(1,(v))
193 
194 /**
195  * atomic_dec_and_test - decrement and test
196  * @v: pointer of type atomic_t
197  *
198  * Atomically decrements @v by 1 and
199  * returns true if the result is 0, or false for all other
200  * cases.
201  */
202 #define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0)
203 
204 /**
205  * atomic_inc_and_test - increment and test
206  * @v: pointer of type atomic_t
207  *
208  * Atomically increments @v by 1
209  * and returns true if the result is zero, or false for all
210  * other cases.
211  */
212 #define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0)
213 
214 /**
215  * atomic_add_negative - add and test if negative
216  * @v: pointer of type atomic_t
217  * @i: integer value to add
218  *
219  * Atomically adds @i to @v and returns true
220  * if the result is negative, or false when
221  * result is greater than or equal to zero.
222  */
223 #define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
224 
225 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
226 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
227 
228 /**
229  * atomic_add_unless - add unless the number is a given value
230  * @v: pointer of type atomic_t
231  * @a: the amount to add to v...
232  * @u: ...unless v is equal to u.
233  *
234  * Atomically adds @a to @v, so long as it was not @u.
235  * Returns non-zero if @v was not @u, and zero otherwise.
236  */
237 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
238 {
239 	int c, old;
240 	c = atomic_read(v);
241 	for (;;) {
242 		if (unlikely(c == (u)))
243 			break;
244 		old = atomic_cmpxchg((v), c, c + (a));
245 		if (likely(old == c))
246 			break;
247 		c = old;
248 	}
249 	return c != (u);
250 }
251 
252 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
253 
254 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
255 {
256     unsigned int all_f = -1;
257     unsigned int vval;
258 
259     __asm__ __volatile__(
260 	"rsil    a15,"__stringify(LOCKLEVEL)"\n\t"
261 	"l32i    %0, %2, 0             \n\t"
262 	"xor     %1, %4, %3            \n\t"
263 	"and     %0, %0, %4            \n\t"
264 	"s32i    %0, %2, 0             \n\t"
265 	"wsr     a15, "__stringify(PS)"      \n\t"
266 	"rsync                         \n"
267 	: "=&a" (vval), "=a" (mask)
268 	: "a" (v), "a" (all_f), "1" (mask)
269 	: "a15", "memory"
270 	);
271 }
272 
273 static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
274 {
275     unsigned int vval;
276 
277     __asm__ __volatile__(
278 	"rsil    a15,"__stringify(LOCKLEVEL)"\n\t"
279 	"l32i    %0, %2, 0             \n\t"
280 	"or      %0, %0, %1            \n\t"
281 	"s32i    %0, %2, 0             \n\t"
282 	"wsr     a15, "__stringify(PS)"       \n\t"
283 	"rsync                         \n"
284 	: "=&a" (vval)
285 	: "a" (mask), "a" (v)
286 	: "a15", "memory"
287 	);
288 }
289 
290 /* Atomic operations are already serializing */
291 #define smp_mb__before_atomic_dec()	barrier()
292 #define smp_mb__after_atomic_dec()	barrier()
293 #define smp_mb__before_atomic_inc()	barrier()
294 #define smp_mb__after_atomic_inc()	barrier()
295 
296 #include <asm-generic/atomic.h>
297 #endif /* __KERNEL__ */
298 
299 #endif /* _XTENSA_ATOMIC_H */
300 
301