xref: /openbmc/linux/arch/arm/include/asm/atomic.h (revision fd589a8f)
1 /*
2  *  arch/arm/include/asm/atomic.h
3  *
4  *  Copyright (C) 1996 Russell King.
5  *  Copyright (C) 2002 Deep Blue Solutions Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #ifndef __ASM_ARM_ATOMIC_H
12 #define __ASM_ARM_ATOMIC_H
13 
14 #include <linux/compiler.h>
15 #include <linux/types.h>
16 #include <asm/system.h>
17 
18 #define ATOMIC_INIT(i)	{ (i) }
19 
20 #ifdef __KERNEL__
21 
22 #define atomic_read(v)	((v)->counter)
23 
24 #if __LINUX_ARM_ARCH__ >= 6
25 
26 /*
27  * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
28  * store exclusive to ensure that these are atomic.  We may loop
29  * to ensure that the update happens.  Writing to 'v->counter'
30  * without using the following operations WILL break the atomic
31  * nature of these ops.
32  */
33 static inline void atomic_set(atomic_t *v, int i)
34 {
35 	unsigned long tmp;
36 
37 	__asm__ __volatile__("@ atomic_set\n"
38 "1:	ldrex	%0, [%1]\n"
39 "	strex	%0, %2, [%1]\n"
40 "	teq	%0, #0\n"
41 "	bne	1b"
42 	: "=&r" (tmp)
43 	: "r" (&v->counter), "r" (i)
44 	: "cc");
45 }
46 
47 static inline void atomic_add(int i, atomic_t *v)
48 {
49 	unsigned long tmp;
50 	int result;
51 
52 	__asm__ __volatile__("@ atomic_add\n"
53 "1:	ldrex	%0, [%2]\n"
54 "	add	%0, %0, %3\n"
55 "	strex	%1, %0, [%2]\n"
56 "	teq	%1, #0\n"
57 "	bne	1b"
58 	: "=&r" (result), "=&r" (tmp)
59 	: "r" (&v->counter), "Ir" (i)
60 	: "cc");
61 }
62 
63 static inline int atomic_add_return(int i, atomic_t *v)
64 {
65 	unsigned long tmp;
66 	int result;
67 
68 	smp_mb();
69 
70 	__asm__ __volatile__("@ atomic_add_return\n"
71 "1:	ldrex	%0, [%2]\n"
72 "	add	%0, %0, %3\n"
73 "	strex	%1, %0, [%2]\n"
74 "	teq	%1, #0\n"
75 "	bne	1b"
76 	: "=&r" (result), "=&r" (tmp)
77 	: "r" (&v->counter), "Ir" (i)
78 	: "cc");
79 
80 	smp_mb();
81 
82 	return result;
83 }
84 
85 static inline void atomic_sub(int i, atomic_t *v)
86 {
87 	unsigned long tmp;
88 	int result;
89 
90 	__asm__ __volatile__("@ atomic_sub\n"
91 "1:	ldrex	%0, [%2]\n"
92 "	sub	%0, %0, %3\n"
93 "	strex	%1, %0, [%2]\n"
94 "	teq	%1, #0\n"
95 "	bne	1b"
96 	: "=&r" (result), "=&r" (tmp)
97 	: "r" (&v->counter), "Ir" (i)
98 	: "cc");
99 }
100 
101 static inline int atomic_sub_return(int i, atomic_t *v)
102 {
103 	unsigned long tmp;
104 	int result;
105 
106 	smp_mb();
107 
108 	__asm__ __volatile__("@ atomic_sub_return\n"
109 "1:	ldrex	%0, [%2]\n"
110 "	sub	%0, %0, %3\n"
111 "	strex	%1, %0, [%2]\n"
112 "	teq	%1, #0\n"
113 "	bne	1b"
114 	: "=&r" (result), "=&r" (tmp)
115 	: "r" (&v->counter), "Ir" (i)
116 	: "cc");
117 
118 	smp_mb();
119 
120 	return result;
121 }
122 
123 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
124 {
125 	unsigned long oldval, res;
126 
127 	smp_mb();
128 
129 	do {
130 		__asm__ __volatile__("@ atomic_cmpxchg\n"
131 		"ldrex	%1, [%2]\n"
132 		"mov	%0, #0\n"
133 		"teq	%1, %3\n"
134 		"strexeq %0, %4, [%2]\n"
135 		    : "=&r" (res), "=&r" (oldval)
136 		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
137 		    : "cc");
138 	} while (res);
139 
140 	smp_mb();
141 
142 	return oldval;
143 }
144 
145 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
146 {
147 	unsigned long tmp, tmp2;
148 
149 	__asm__ __volatile__("@ atomic_clear_mask\n"
150 "1:	ldrex	%0, [%2]\n"
151 "	bic	%0, %0, %3\n"
152 "	strex	%1, %0, [%2]\n"
153 "	teq	%1, #0\n"
154 "	bne	1b"
155 	: "=&r" (tmp), "=&r" (tmp2)
156 	: "r" (addr), "Ir" (mask)
157 	: "cc");
158 }
159 
160 #else /* ARM_ARCH_6 */
161 
162 #ifdef CONFIG_SMP
163 #error SMP not supported on pre-ARMv6 CPUs
164 #endif
165 
166 #define atomic_set(v,i)	(((v)->counter) = (i))
167 
168 static inline int atomic_add_return(int i, atomic_t *v)
169 {
170 	unsigned long flags;
171 	int val;
172 
173 	raw_local_irq_save(flags);
174 	val = v->counter;
175 	v->counter = val += i;
176 	raw_local_irq_restore(flags);
177 
178 	return val;
179 }
180 #define atomic_add(i, v)	(void) atomic_add_return(i, v)
181 
182 static inline int atomic_sub_return(int i, atomic_t *v)
183 {
184 	unsigned long flags;
185 	int val;
186 
187 	raw_local_irq_save(flags);
188 	val = v->counter;
189 	v->counter = val -= i;
190 	raw_local_irq_restore(flags);
191 
192 	return val;
193 }
194 #define atomic_sub(i, v)	(void) atomic_sub_return(i, v)
195 
196 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
197 {
198 	int ret;
199 	unsigned long flags;
200 
201 	raw_local_irq_save(flags);
202 	ret = v->counter;
203 	if (likely(ret == old))
204 		v->counter = new;
205 	raw_local_irq_restore(flags);
206 
207 	return ret;
208 }
209 
210 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
211 {
212 	unsigned long flags;
213 
214 	raw_local_irq_save(flags);
215 	*addr &= ~mask;
216 	raw_local_irq_restore(flags);
217 }
218 
219 #endif /* __LINUX_ARM_ARCH__ */
220 
221 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
222 
223 static inline int atomic_add_unless(atomic_t *v, int a, int u)
224 {
225 	int c, old;
226 
227 	c = atomic_read(v);
228 	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
229 		c = old;
230 	return c != u;
231 }
232 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
233 
234 #define atomic_inc(v)		atomic_add(1, v)
235 #define atomic_dec(v)		atomic_sub(1, v)
236 
237 #define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
238 #define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
239 #define atomic_inc_return(v)    (atomic_add_return(1, v))
240 #define atomic_dec_return(v)    (atomic_sub_return(1, v))
241 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
242 
243 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
244 
245 #define smp_mb__before_atomic_dec()	smp_mb()
246 #define smp_mb__after_atomic_dec()	smp_mb()
247 #define smp_mb__before_atomic_inc()	smp_mb()
248 #define smp_mb__after_atomic_inc()	smp_mb()
249 
250 #include <asm-generic/atomic-long.h>
251 #endif
252 #endif
253