xref: /openbmc/linux/arch/xtensa/include/asm/atomic.h (revision fb574682)
1 /*
2  * include/asm-xtensa/atomic.h
3  *
4  * Atomic operations that C can't guarantee us.  Useful for resource counting..
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  *
10  * Copyright (C) 2001 - 2008 Tensilica Inc.
11  */
12 
13 #ifndef _XTENSA_ATOMIC_H
14 #define _XTENSA_ATOMIC_H
15 
16 #include <linux/stringify.h>
17 #include <linux/types.h>
18 #include <asm/processor.h>
19 #include <asm/cmpxchg.h>
20 #include <asm/barrier.h>
21 
22 #define ATOMIC_INIT(i)	{ (i) }
23 
24 /*
25  * This Xtensa implementation assumes that the right mechanism
26  * for exclusion is for locking interrupts to level EXCM_LEVEL.
27  *
28  * Locking interrupts looks like this:
29  *
30  *    rsil a15, TOPLEVEL
31  *    <code>
32  *    wsr  a15, PS
33  *    rsync
34  *
35  * Note that a15 is used here because the register allocation
36  * done by the compiler is not guaranteed and a window overflow
37  * may not occur between the rsil and wsr instructions. By using
38  * a15 in the rsil, the machine is guaranteed to be in a state
39  * where no register reference will cause an overflow.
40  */
41 
42 /**
43  * atomic_read - read atomic variable
44  * @v: pointer of type atomic_t
45  *
46  * Atomically reads the value of @v.
47  */
48 #define atomic_read(v)		READ_ONCE((v)->counter)
49 
50 /**
51  * atomic_set - set atomic variable
52  * @v: pointer of type atomic_t
53  * @i: required value
54  *
55  * Atomically sets the value of @v to @i.
56  */
57 #define atomic_set(v,i)		WRITE_ONCE((v)->counter, (i))
58 
59 #if XCHAL_HAVE_EXCLUSIVE
60 #define ATOMIC_OP(op)							\
61 static inline void atomic_##op(int i, atomic_t *v)			\
62 {									\
63 	unsigned long tmp;						\
64 	int result;							\
65 									\
66 	__asm__ __volatile__(						\
67 			"1:     l32ex   %[tmp], %[addr]\n"		\
68 			"       " #op " %[result], %[tmp], %[i]\n"	\
69 			"       s32ex   %[result], %[addr]\n"		\
70 			"       getex   %[result]\n"			\
71 			"       beqz    %[result], 1b\n"		\
72 			: [result] "=&a" (result), [tmp] "=&a" (tmp)	\
73 			: [i] "a" (i), [addr] "a" (v)			\
74 			: "memory"					\
75 			);						\
76 }									\
77 
78 #define ATOMIC_OP_RETURN(op)						\
79 static inline int atomic_##op##_return(int i, atomic_t *v)		\
80 {									\
81 	unsigned long tmp;						\
82 	int result;							\
83 									\
84 	__asm__ __volatile__(						\
85 			"1:     l32ex   %[tmp], %[addr]\n"		\
86 			"       " #op " %[result], %[tmp], %[i]\n"	\
87 			"       s32ex   %[result], %[addr]\n"		\
88 			"       getex   %[result]\n"			\
89 			"       beqz    %[result], 1b\n"		\
90 			"       " #op " %[result], %[tmp], %[i]\n"	\
91 			: [result] "=&a" (result), [tmp] "=&a" (tmp)	\
92 			: [i] "a" (i), [addr] "a" (v)			\
93 			: "memory"					\
94 			);						\
95 									\
96 	return result;							\
97 }
98 
99 #define ATOMIC_FETCH_OP(op)						\
100 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
101 {									\
102 	unsigned long tmp;						\
103 	int result;							\
104 									\
105 	__asm__ __volatile__(						\
106 			"1:     l32ex   %[tmp], %[addr]\n"		\
107 			"       " #op " %[result], %[tmp], %[i]\n"	\
108 			"       s32ex   %[result], %[addr]\n"		\
109 			"       getex   %[result]\n"			\
110 			"       beqz    %[result], 1b\n"		\
111 			: [result] "=&a" (result), [tmp] "=&a" (tmp)	\
112 			: [i] "a" (i), [addr] "a" (v)			\
113 			: "memory"					\
114 			);						\
115 									\
116 	return tmp;							\
117 }
118 
119 #elif XCHAL_HAVE_S32C1I
120 #define ATOMIC_OP(op)							\
121 static inline void atomic_##op(int i, atomic_t * v)			\
122 {									\
123 	unsigned long tmp;						\
124 	int result;							\
125 									\
126 	__asm__ __volatile__(						\
127 			"1:     l32i    %[tmp], %[mem]\n"		\
128 			"       wsr     %[tmp], scompare1\n"		\
129 			"       " #op " %[result], %[tmp], %[i]\n"	\
130 			"       s32c1i  %[result], %[mem]\n"		\
131 			"       bne     %[result], %[tmp], 1b\n"	\
132 			: [result] "=&a" (result), [tmp] "=&a" (tmp),	\
133 			  [mem] "+m" (*v)				\
134 			: [i] "a" (i)					\
135 			: "memory"					\
136 			);						\
137 }									\
138 
139 #define ATOMIC_OP_RETURN(op)						\
140 static inline int atomic_##op##_return(int i, atomic_t * v)		\
141 {									\
142 	unsigned long tmp;						\
143 	int result;							\
144 									\
145 	__asm__ __volatile__(						\
146 			"1:     l32i    %[tmp], %[mem]\n"		\
147 			"       wsr     %[tmp], scompare1\n"		\
148 			"       " #op " %[result], %[tmp], %[i]\n"	\
149 			"       s32c1i  %[result], %[mem]\n"		\
150 			"       bne     %[result], %[tmp], 1b\n"	\
151 			"       " #op " %[result], %[result], %[i]\n"	\
152 			: [result] "=&a" (result), [tmp] "=&a" (tmp),	\
153 			  [mem] "+m" (*v)				\
154 			: [i] "a" (i)					\
155 			: "memory"					\
156 			);						\
157 									\
158 	return result;							\
159 }
160 
161 #define ATOMIC_FETCH_OP(op)						\
162 static inline int atomic_fetch_##op(int i, atomic_t * v)		\
163 {									\
164 	unsigned long tmp;						\
165 	int result;							\
166 									\
167 	__asm__ __volatile__(						\
168 			"1:     l32i    %[tmp], %[mem]\n"		\
169 			"       wsr     %[tmp], scompare1\n"		\
170 			"       " #op " %[result], %[tmp], %[i]\n"	\
171 			"       s32c1i  %[result], %[mem]\n"		\
172 			"       bne     %[result], %[tmp], 1b\n"	\
173 			: [result] "=&a" (result), [tmp] "=&a" (tmp),	\
174 			  [mem] "+m" (*v)				\
175 			: [i] "a" (i)					\
176 			: "memory"					\
177 			);						\
178 									\
179 	return result;							\
180 }
181 
182 #else /* XCHAL_HAVE_S32C1I */
183 
184 #define ATOMIC_OP(op)							\
185 static inline void atomic_##op(int i, atomic_t * v)			\
186 {									\
187 	unsigned int vval;						\
188 									\
189 	__asm__ __volatile__(						\
190 			"       rsil    a15, "__stringify(TOPLEVEL)"\n"	\
191 			"       l32i    %[result], %[mem]\n"		\
192 			"       " #op " %[result], %[result], %[i]\n"	\
193 			"       s32i    %[result], %[mem]\n"		\
194 			"       wsr     a15, ps\n"			\
195 			"       rsync\n"				\
196 			: [result] "=&a" (vval), [mem] "+m" (*v)	\
197 			: [i] "a" (i)					\
198 			: "a15", "memory"				\
199 			);						\
200 }									\
201 
202 #define ATOMIC_OP_RETURN(op)						\
203 static inline int atomic_##op##_return(int i, atomic_t * v)		\
204 {									\
205 	unsigned int vval;						\
206 									\
207 	__asm__ __volatile__(						\
208 			"       rsil    a15,"__stringify(TOPLEVEL)"\n"	\
209 			"       l32i    %[result], %[mem]\n"		\
210 			"       " #op " %[result], %[result], %[i]\n"	\
211 			"       s32i    %[result], %[mem]\n"		\
212 			"       wsr     a15, ps\n"			\
213 			"       rsync\n"				\
214 			: [result] "=&a" (vval), [mem] "+m" (*v)	\
215 			: [i] "a" (i)					\
216 			: "a15", "memory"				\
217 			);						\
218 									\
219 	return vval;							\
220 }
221 
222 #define ATOMIC_FETCH_OP(op)						\
223 static inline int atomic_fetch_##op(int i, atomic_t * v)		\
224 {									\
225 	unsigned int tmp, vval;						\
226 									\
227 	__asm__ __volatile__(						\
228 			"       rsil    a15,"__stringify(TOPLEVEL)"\n"	\
229 			"       l32i    %[result], %[mem]\n"		\
230 			"       " #op " %[tmp], %[result], %[i]\n"	\
231 			"       s32i    %[tmp], %[mem]\n"		\
232 			"       wsr     a15, ps\n"			\
233 			"       rsync\n"				\
234 			: [result] "=&a" (vval), [tmp] "=&a" (tmp),	\
235 			  [mem] "+m" (*v)				\
236 			: [i] "a" (i)					\
237 			: "a15", "memory"				\
238 			);						\
239 									\
240 	return vval;							\
241 }
242 
243 #endif /* XCHAL_HAVE_S32C1I */
244 
245 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) ATOMIC_OP_RETURN(op)
246 
247 ATOMIC_OPS(add)
248 ATOMIC_OPS(sub)
249 
250 #undef ATOMIC_OPS
251 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
252 
253 ATOMIC_OPS(and)
254 ATOMIC_OPS(or)
255 ATOMIC_OPS(xor)
256 
257 #undef ATOMIC_OPS
258 #undef ATOMIC_FETCH_OP
259 #undef ATOMIC_OP_RETURN
260 #undef ATOMIC_OP
261 
262 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
263 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
264 
265 #endif /* _XTENSA_ATOMIC_H */
266