xref: /openbmc/linux/arch/arm/include/asm/atomic.h (revision 4baa9922430662431231ac637adedddbb0cfb2d7)
1*4baa9922SRussell King /*
2*4baa9922SRussell King  *  arch/arm/include/asm/atomic.h
3*4baa9922SRussell King  *
4*4baa9922SRussell King  *  Copyright (C) 1996 Russell King.
5*4baa9922SRussell King  *  Copyright (C) 2002 Deep Blue Solutions Ltd.
6*4baa9922SRussell King  *
7*4baa9922SRussell King  * This program is free software; you can redistribute it and/or modify
8*4baa9922SRussell King  * it under the terms of the GNU General Public License version 2 as
9*4baa9922SRussell King  * published by the Free Software Foundation.
10*4baa9922SRussell King  */
11*4baa9922SRussell King #ifndef __ASM_ARM_ATOMIC_H
12*4baa9922SRussell King #define __ASM_ARM_ATOMIC_H
13*4baa9922SRussell King 
14*4baa9922SRussell King #include <linux/compiler.h>
15*4baa9922SRussell King #include <asm/system.h>
16*4baa9922SRussell King 
17*4baa9922SRussell King typedef struct { volatile int counter; } atomic_t;
18*4baa9922SRussell King 
19*4baa9922SRussell King #define ATOMIC_INIT(i)	{ (i) }
20*4baa9922SRussell King 
21*4baa9922SRussell King #ifdef __KERNEL__
22*4baa9922SRussell King 
23*4baa9922SRussell King #define atomic_read(v)	((v)->counter)
24*4baa9922SRussell King 
25*4baa9922SRussell King #if __LINUX_ARM_ARCH__ >= 6
26*4baa9922SRussell King 
27*4baa9922SRussell King /*
28*4baa9922SRussell King  * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
29*4baa9922SRussell King  * store exclusive to ensure that these are atomic.  We may loop
30*4baa9922SRussell King  * to ensure that the update happens.  Writing to 'v->counter'
31*4baa9922SRussell King  * without using the following operations WILL break the atomic
32*4baa9922SRussell King  * nature of these ops.
33*4baa9922SRussell King  */
34*4baa9922SRussell King static inline void atomic_set(atomic_t *v, int i)
35*4baa9922SRussell King {
36*4baa9922SRussell King 	unsigned long tmp;
37*4baa9922SRussell King 
38*4baa9922SRussell King 	__asm__ __volatile__("@ atomic_set\n"
39*4baa9922SRussell King "1:	ldrex	%0, [%1]\n"
40*4baa9922SRussell King "	strex	%0, %2, [%1]\n"
41*4baa9922SRussell King "	teq	%0, #0\n"
42*4baa9922SRussell King "	bne	1b"
43*4baa9922SRussell King 	: "=&r" (tmp)
44*4baa9922SRussell King 	: "r" (&v->counter), "r" (i)
45*4baa9922SRussell King 	: "cc");
46*4baa9922SRussell King }
47*4baa9922SRussell King 
48*4baa9922SRussell King static inline int atomic_add_return(int i, atomic_t *v)
49*4baa9922SRussell King {
50*4baa9922SRussell King 	unsigned long tmp;
51*4baa9922SRussell King 	int result;
52*4baa9922SRussell King 
53*4baa9922SRussell King 	__asm__ __volatile__("@ atomic_add_return\n"
54*4baa9922SRussell King "1:	ldrex	%0, [%2]\n"
55*4baa9922SRussell King "	add	%0, %0, %3\n"
56*4baa9922SRussell King "	strex	%1, %0, [%2]\n"
57*4baa9922SRussell King "	teq	%1, #0\n"
58*4baa9922SRussell King "	bne	1b"
59*4baa9922SRussell King 	: "=&r" (result), "=&r" (tmp)
60*4baa9922SRussell King 	: "r" (&v->counter), "Ir" (i)
61*4baa9922SRussell King 	: "cc");
62*4baa9922SRussell King 
63*4baa9922SRussell King 	return result;
64*4baa9922SRussell King }
65*4baa9922SRussell King 
66*4baa9922SRussell King static inline int atomic_sub_return(int i, atomic_t *v)
67*4baa9922SRussell King {
68*4baa9922SRussell King 	unsigned long tmp;
69*4baa9922SRussell King 	int result;
70*4baa9922SRussell King 
71*4baa9922SRussell King 	__asm__ __volatile__("@ atomic_sub_return\n"
72*4baa9922SRussell King "1:	ldrex	%0, [%2]\n"
73*4baa9922SRussell King "	sub	%0, %0, %3\n"
74*4baa9922SRussell King "	strex	%1, %0, [%2]\n"
75*4baa9922SRussell King "	teq	%1, #0\n"
76*4baa9922SRussell King "	bne	1b"
77*4baa9922SRussell King 	: "=&r" (result), "=&r" (tmp)
78*4baa9922SRussell King 	: "r" (&v->counter), "Ir" (i)
79*4baa9922SRussell King 	: "cc");
80*4baa9922SRussell King 
81*4baa9922SRussell King 	return result;
82*4baa9922SRussell King }
83*4baa9922SRussell King 
84*4baa9922SRussell King static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
85*4baa9922SRussell King {
86*4baa9922SRussell King 	unsigned long oldval, res;
87*4baa9922SRussell King 
88*4baa9922SRussell King 	do {
89*4baa9922SRussell King 		__asm__ __volatile__("@ atomic_cmpxchg\n"
90*4baa9922SRussell King 		"ldrex	%1, [%2]\n"
91*4baa9922SRussell King 		"mov	%0, #0\n"
92*4baa9922SRussell King 		"teq	%1, %3\n"
93*4baa9922SRussell King 		"strexeq %0, %4, [%2]\n"
94*4baa9922SRussell King 		    : "=&r" (res), "=&r" (oldval)
95*4baa9922SRussell King 		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
96*4baa9922SRussell King 		    : "cc");
97*4baa9922SRussell King 	} while (res);
98*4baa9922SRussell King 
99*4baa9922SRussell King 	return oldval;
100*4baa9922SRussell King }
101*4baa9922SRussell King 
102*4baa9922SRussell King static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
103*4baa9922SRussell King {
104*4baa9922SRussell King 	unsigned long tmp, tmp2;
105*4baa9922SRussell King 
106*4baa9922SRussell King 	__asm__ __volatile__("@ atomic_clear_mask\n"
107*4baa9922SRussell King "1:	ldrex	%0, [%2]\n"
108*4baa9922SRussell King "	bic	%0, %0, %3\n"
109*4baa9922SRussell King "	strex	%1, %0, [%2]\n"
110*4baa9922SRussell King "	teq	%1, #0\n"
111*4baa9922SRussell King "	bne	1b"
112*4baa9922SRussell King 	: "=&r" (tmp), "=&r" (tmp2)
113*4baa9922SRussell King 	: "r" (addr), "Ir" (mask)
114*4baa9922SRussell King 	: "cc");
115*4baa9922SRussell King }
116*4baa9922SRussell King 
117*4baa9922SRussell King #else /* ARM_ARCH_6 */
118*4baa9922SRussell King 
119*4baa9922SRussell King #include <asm/system.h>
120*4baa9922SRussell King 
121*4baa9922SRussell King #ifdef CONFIG_SMP
122*4baa9922SRussell King #error SMP not supported on pre-ARMv6 CPUs
123*4baa9922SRussell King #endif
124*4baa9922SRussell King 
125*4baa9922SRussell King #define atomic_set(v,i)	(((v)->counter) = (i))
126*4baa9922SRussell King 
127*4baa9922SRussell King static inline int atomic_add_return(int i, atomic_t *v)
128*4baa9922SRussell King {
129*4baa9922SRussell King 	unsigned long flags;
130*4baa9922SRussell King 	int val;
131*4baa9922SRussell King 
132*4baa9922SRussell King 	raw_local_irq_save(flags);
133*4baa9922SRussell King 	val = v->counter;
134*4baa9922SRussell King 	v->counter = val += i;
135*4baa9922SRussell King 	raw_local_irq_restore(flags);
136*4baa9922SRussell King 
137*4baa9922SRussell King 	return val;
138*4baa9922SRussell King }
139*4baa9922SRussell King 
140*4baa9922SRussell King static inline int atomic_sub_return(int i, atomic_t *v)
141*4baa9922SRussell King {
142*4baa9922SRussell King 	unsigned long flags;
143*4baa9922SRussell King 	int val;
144*4baa9922SRussell King 
145*4baa9922SRussell King 	raw_local_irq_save(flags);
146*4baa9922SRussell King 	val = v->counter;
147*4baa9922SRussell King 	v->counter = val -= i;
148*4baa9922SRussell King 	raw_local_irq_restore(flags);
149*4baa9922SRussell King 
150*4baa9922SRussell King 	return val;
151*4baa9922SRussell King }
152*4baa9922SRussell King 
153*4baa9922SRussell King static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
154*4baa9922SRussell King {
155*4baa9922SRussell King 	int ret;
156*4baa9922SRussell King 	unsigned long flags;
157*4baa9922SRussell King 
158*4baa9922SRussell King 	raw_local_irq_save(flags);
159*4baa9922SRussell King 	ret = v->counter;
160*4baa9922SRussell King 	if (likely(ret == old))
161*4baa9922SRussell King 		v->counter = new;
162*4baa9922SRussell King 	raw_local_irq_restore(flags);
163*4baa9922SRussell King 
164*4baa9922SRussell King 	return ret;
165*4baa9922SRussell King }
166*4baa9922SRussell King 
167*4baa9922SRussell King static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
168*4baa9922SRussell King {
169*4baa9922SRussell King 	unsigned long flags;
170*4baa9922SRussell King 
171*4baa9922SRussell King 	raw_local_irq_save(flags);
172*4baa9922SRussell King 	*addr &= ~mask;
173*4baa9922SRussell King 	raw_local_irq_restore(flags);
174*4baa9922SRussell King }
175*4baa9922SRussell King 
176*4baa9922SRussell King #endif /* __LINUX_ARM_ARCH__ */
177*4baa9922SRussell King 
178*4baa9922SRussell King #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
179*4baa9922SRussell King 
180*4baa9922SRussell King static inline int atomic_add_unless(atomic_t *v, int a, int u)
181*4baa9922SRussell King {
182*4baa9922SRussell King 	int c, old;
183*4baa9922SRussell King 
184*4baa9922SRussell King 	c = atomic_read(v);
185*4baa9922SRussell King 	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
186*4baa9922SRussell King 		c = old;
187*4baa9922SRussell King 	return c != u;
188*4baa9922SRussell King }
189*4baa9922SRussell King #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
190*4baa9922SRussell King 
191*4baa9922SRussell King #define atomic_add(i, v)	(void) atomic_add_return(i, v)
192*4baa9922SRussell King #define atomic_inc(v)		(void) atomic_add_return(1, v)
193*4baa9922SRussell King #define atomic_sub(i, v)	(void) atomic_sub_return(i, v)
194*4baa9922SRussell King #define atomic_dec(v)		(void) atomic_sub_return(1, v)
195*4baa9922SRussell King 
196*4baa9922SRussell King #define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
197*4baa9922SRussell King #define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
198*4baa9922SRussell King #define atomic_inc_return(v)    (atomic_add_return(1, v))
199*4baa9922SRussell King #define atomic_dec_return(v)    (atomic_sub_return(1, v))
200*4baa9922SRussell King #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
201*4baa9922SRussell King 
202*4baa9922SRussell King #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
203*4baa9922SRussell King 
204*4baa9922SRussell King /* Atomic operations are already serializing on ARM */
205*4baa9922SRussell King #define smp_mb__before_atomic_dec()	barrier()
206*4baa9922SRussell King #define smp_mb__after_atomic_dec()	barrier()
207*4baa9922SRussell King #define smp_mb__before_atomic_inc()	barrier()
208*4baa9922SRussell King #define smp_mb__after_atomic_inc()	barrier()
209*4baa9922SRussell King 
210*4baa9922SRussell King #include <asm-generic/atomic.h>
211*4baa9922SRussell King #endif
212*4baa9922SRussell King #endif
213