xref: /openbmc/linux/arch/arm/include/asm/atomic.h (revision 9f97da78bf018206fb623cd351d454af2f105fe0)
14baa9922SRussell King /*
24baa9922SRussell King  *  arch/arm/include/asm/atomic.h
34baa9922SRussell King  *
44baa9922SRussell King  *  Copyright (C) 1996 Russell King.
54baa9922SRussell King  *  Copyright (C) 2002 Deep Blue Solutions Ltd.
64baa9922SRussell King  *
74baa9922SRussell King  * This program is free software; you can redistribute it and/or modify
84baa9922SRussell King  * it under the terms of the GNU General Public License version 2 as
94baa9922SRussell King  * published by the Free Software Foundation.
104baa9922SRussell King  */
114baa9922SRussell King #ifndef __ASM_ARM_ATOMIC_H
124baa9922SRussell King #define __ASM_ARM_ATOMIC_H
134baa9922SRussell King 
144baa9922SRussell King #include <linux/compiler.h>
15ea435467SMatthew Wilcox #include <linux/types.h>
16*9f97da78SDavid Howells #include <linux/irqflags.h>
17*9f97da78SDavid Howells #include <asm/barrier.h>
18*9f97da78SDavid Howells #include <asm/cmpxchg.h>
194baa9922SRussell King 
204baa9922SRussell King #define ATOMIC_INIT(i)	{ (i) }
214baa9922SRussell King 
224baa9922SRussell King #ifdef __KERNEL__
234baa9922SRussell King 
24200b812dSCatalin Marinas /*
25200b812dSCatalin Marinas  * On ARM, ordinary assignment (str instruction) doesn't clear the local
26200b812dSCatalin Marinas  * strex/ldrex monitor on some implementations. The reason we can use it for
27200b812dSCatalin Marinas  * atomic_set() is the clrex or dummy strex done on every exception return.
28200b812dSCatalin Marinas  */
29f3d46f9dSAnton Blanchard #define atomic_read(v)	(*(volatile int *)&(v)->counter)
30200b812dSCatalin Marinas #define atomic_set(v,i)	(((v)->counter) = (i))
314baa9922SRussell King 
324baa9922SRussell King #if __LINUX_ARM_ARCH__ >= 6
334baa9922SRussell King 
344baa9922SRussell King /*
354baa9922SRussell King  * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
364baa9922SRussell King  * store exclusive to ensure that these are atomic.  We may loop
37200b812dSCatalin Marinas  * to ensure that the update happens.
384baa9922SRussell King  */
39bac4e960SRussell King static inline void atomic_add(int i, atomic_t *v)
40bac4e960SRussell King {
41bac4e960SRussell King 	unsigned long tmp;
42bac4e960SRussell King 	int result;
43bac4e960SRussell King 
44bac4e960SRussell King 	__asm__ __volatile__("@ atomic_add\n"
45398aa668SWill Deacon "1:	ldrex	%0, [%3]\n"
46398aa668SWill Deacon "	add	%0, %0, %4\n"
47398aa668SWill Deacon "	strex	%1, %0, [%3]\n"
48bac4e960SRussell King "	teq	%1, #0\n"
49bac4e960SRussell King "	bne	1b"
50398aa668SWill Deacon 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
51bac4e960SRussell King 	: "r" (&v->counter), "Ir" (i)
52bac4e960SRussell King 	: "cc");
53bac4e960SRussell King }
54bac4e960SRussell King 
554baa9922SRussell King static inline int atomic_add_return(int i, atomic_t *v)
564baa9922SRussell King {
574baa9922SRussell King 	unsigned long tmp;
584baa9922SRussell King 	int result;
594baa9922SRussell King 
60bac4e960SRussell King 	smp_mb();
61bac4e960SRussell King 
624baa9922SRussell King 	__asm__ __volatile__("@ atomic_add_return\n"
63398aa668SWill Deacon "1:	ldrex	%0, [%3]\n"
64398aa668SWill Deacon "	add	%0, %0, %4\n"
65398aa668SWill Deacon "	strex	%1, %0, [%3]\n"
664baa9922SRussell King "	teq	%1, #0\n"
674baa9922SRussell King "	bne	1b"
68398aa668SWill Deacon 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
694baa9922SRussell King 	: "r" (&v->counter), "Ir" (i)
704baa9922SRussell King 	: "cc");
714baa9922SRussell King 
72bac4e960SRussell King 	smp_mb();
73bac4e960SRussell King 
744baa9922SRussell King 	return result;
754baa9922SRussell King }
764baa9922SRussell King 
77bac4e960SRussell King static inline void atomic_sub(int i, atomic_t *v)
78bac4e960SRussell King {
79bac4e960SRussell King 	unsigned long tmp;
80bac4e960SRussell King 	int result;
81bac4e960SRussell King 
82bac4e960SRussell King 	__asm__ __volatile__("@ atomic_sub\n"
83398aa668SWill Deacon "1:	ldrex	%0, [%3]\n"
84398aa668SWill Deacon "	sub	%0, %0, %4\n"
85398aa668SWill Deacon "	strex	%1, %0, [%3]\n"
86bac4e960SRussell King "	teq	%1, #0\n"
87bac4e960SRussell King "	bne	1b"
88398aa668SWill Deacon 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
89bac4e960SRussell King 	: "r" (&v->counter), "Ir" (i)
90bac4e960SRussell King 	: "cc");
91bac4e960SRussell King }
92bac4e960SRussell King 
934baa9922SRussell King static inline int atomic_sub_return(int i, atomic_t *v)
944baa9922SRussell King {
954baa9922SRussell King 	unsigned long tmp;
964baa9922SRussell King 	int result;
974baa9922SRussell King 
98bac4e960SRussell King 	smp_mb();
99bac4e960SRussell King 
1004baa9922SRussell King 	__asm__ __volatile__("@ atomic_sub_return\n"
101398aa668SWill Deacon "1:	ldrex	%0, [%3]\n"
102398aa668SWill Deacon "	sub	%0, %0, %4\n"
103398aa668SWill Deacon "	strex	%1, %0, [%3]\n"
1044baa9922SRussell King "	teq	%1, #0\n"
1054baa9922SRussell King "	bne	1b"
106398aa668SWill Deacon 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1074baa9922SRussell King 	: "r" (&v->counter), "Ir" (i)
1084baa9922SRussell King 	: "cc");
1094baa9922SRussell King 
110bac4e960SRussell King 	smp_mb();
111bac4e960SRussell King 
1124baa9922SRussell King 	return result;
1134baa9922SRussell King }
1144baa9922SRussell King 
1154baa9922SRussell King static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1164baa9922SRussell King {
1174baa9922SRussell King 	unsigned long oldval, res;
1184baa9922SRussell King 
119bac4e960SRussell King 	smp_mb();
120bac4e960SRussell King 
1214baa9922SRussell King 	do {
1224baa9922SRussell King 		__asm__ __volatile__("@ atomic_cmpxchg\n"
123398aa668SWill Deacon 		"ldrex	%1, [%3]\n"
1244baa9922SRussell King 		"mov	%0, #0\n"
125398aa668SWill Deacon 		"teq	%1, %4\n"
126398aa668SWill Deacon 		"strexeq %0, %5, [%3]\n"
127398aa668SWill Deacon 		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1284baa9922SRussell King 		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
1294baa9922SRussell King 		    : "cc");
1304baa9922SRussell King 	} while (res);
1314baa9922SRussell King 
132bac4e960SRussell King 	smp_mb();
133bac4e960SRussell King 
1344baa9922SRussell King 	return oldval;
1354baa9922SRussell King }
1364baa9922SRussell King 
1374baa9922SRussell King static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1384baa9922SRussell King {
1394baa9922SRussell King 	unsigned long tmp, tmp2;
1404baa9922SRussell King 
1414baa9922SRussell King 	__asm__ __volatile__("@ atomic_clear_mask\n"
142398aa668SWill Deacon "1:	ldrex	%0, [%3]\n"
143398aa668SWill Deacon "	bic	%0, %0, %4\n"
144398aa668SWill Deacon "	strex	%1, %0, [%3]\n"
1454baa9922SRussell King "	teq	%1, #0\n"
1464baa9922SRussell King "	bne	1b"
147398aa668SWill Deacon 	: "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
1484baa9922SRussell King 	: "r" (addr), "Ir" (mask)
1494baa9922SRussell King 	: "cc");
1504baa9922SRussell King }
1514baa9922SRussell King 
1524baa9922SRussell King #else /* ARM_ARCH_6 */
1534baa9922SRussell King 
1544baa9922SRussell King #ifdef CONFIG_SMP
1554baa9922SRussell King #error SMP not supported on pre-ARMv6 CPUs
1564baa9922SRussell King #endif
1574baa9922SRussell King 
1584baa9922SRussell King static inline int atomic_add_return(int i, atomic_t *v)
1594baa9922SRussell King {
1604baa9922SRussell King 	unsigned long flags;
1614baa9922SRussell King 	int val;
1624baa9922SRussell King 
1634baa9922SRussell King 	raw_local_irq_save(flags);
1644baa9922SRussell King 	val = v->counter;
1654baa9922SRussell King 	v->counter = val += i;
1664baa9922SRussell King 	raw_local_irq_restore(flags);
1674baa9922SRussell King 
1684baa9922SRussell King 	return val;
1694baa9922SRussell King }
170bac4e960SRussell King #define atomic_add(i, v)	(void) atomic_add_return(i, v)
1714baa9922SRussell King 
1724baa9922SRussell King static inline int atomic_sub_return(int i, atomic_t *v)
1734baa9922SRussell King {
1744baa9922SRussell King 	unsigned long flags;
1754baa9922SRussell King 	int val;
1764baa9922SRussell King 
1774baa9922SRussell King 	raw_local_irq_save(flags);
1784baa9922SRussell King 	val = v->counter;
1794baa9922SRussell King 	v->counter = val -= i;
1804baa9922SRussell King 	raw_local_irq_restore(flags);
1814baa9922SRussell King 
1824baa9922SRussell King 	return val;
1834baa9922SRussell King }
184bac4e960SRussell King #define atomic_sub(i, v)	(void) atomic_sub_return(i, v)
1854baa9922SRussell King 
1864baa9922SRussell King static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1874baa9922SRussell King {
1884baa9922SRussell King 	int ret;
1894baa9922SRussell King 	unsigned long flags;
1904baa9922SRussell King 
1914baa9922SRussell King 	raw_local_irq_save(flags);
1924baa9922SRussell King 	ret = v->counter;
1934baa9922SRussell King 	if (likely(ret == old))
1944baa9922SRussell King 		v->counter = new;
1954baa9922SRussell King 	raw_local_irq_restore(flags);
1964baa9922SRussell King 
1974baa9922SRussell King 	return ret;
1984baa9922SRussell King }
1994baa9922SRussell King 
2004baa9922SRussell King static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
2014baa9922SRussell King {
2024baa9922SRussell King 	unsigned long flags;
2034baa9922SRussell King 
2044baa9922SRussell King 	raw_local_irq_save(flags);
2054baa9922SRussell King 	*addr &= ~mask;
2064baa9922SRussell King 	raw_local_irq_restore(flags);
2074baa9922SRussell King }
2084baa9922SRussell King 
2094baa9922SRussell King #endif /* __LINUX_ARM_ARCH__ */
2104baa9922SRussell King 
2114baa9922SRussell King #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2124baa9922SRussell King 
213f24219b4SArun Sharma static inline int __atomic_add_unless(atomic_t *v, int a, int u)
2144baa9922SRussell King {
2154baa9922SRussell King 	int c, old;
2164baa9922SRussell King 
2174baa9922SRussell King 	c = atomic_read(v);
2184baa9922SRussell King 	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
2194baa9922SRussell King 		c = old;
220f24219b4SArun Sharma 	return c;
2214baa9922SRussell King }
2224baa9922SRussell King 
223bac4e960SRussell King #define atomic_inc(v)		atomic_add(1, v)
224bac4e960SRussell King #define atomic_dec(v)		atomic_sub(1, v)
2254baa9922SRussell King 
2264baa9922SRussell King #define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
2274baa9922SRussell King #define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
2284baa9922SRussell King #define atomic_inc_return(v)    (atomic_add_return(1, v))
2294baa9922SRussell King #define atomic_dec_return(v)    (atomic_sub_return(1, v))
2304baa9922SRussell King #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
2314baa9922SRussell King 
2324baa9922SRussell King #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
2334baa9922SRussell King 
234bac4e960SRussell King #define smp_mb__before_atomic_dec()	smp_mb()
235bac4e960SRussell King #define smp_mb__after_atomic_dec()	smp_mb()
236bac4e960SRussell King #define smp_mb__before_atomic_inc()	smp_mb()
237bac4e960SRussell King #define smp_mb__after_atomic_inc()	smp_mb()
2384baa9922SRussell King 
23924b44a66SWill Deacon #ifndef CONFIG_GENERIC_ATOMIC64
24024b44a66SWill Deacon typedef struct {
24124b44a66SWill Deacon 	u64 __aligned(8) counter;
24224b44a66SWill Deacon } atomic64_t;
24324b44a66SWill Deacon 
24424b44a66SWill Deacon #define ATOMIC64_INIT(i) { (i) }
24524b44a66SWill Deacon 
24624b44a66SWill Deacon static inline u64 atomic64_read(atomic64_t *v)
24724b44a66SWill Deacon {
24824b44a66SWill Deacon 	u64 result;
24924b44a66SWill Deacon 
25024b44a66SWill Deacon 	__asm__ __volatile__("@ atomic64_read\n"
25124b44a66SWill Deacon "	ldrexd	%0, %H0, [%1]"
25224b44a66SWill Deacon 	: "=&r" (result)
253398aa668SWill Deacon 	: "r" (&v->counter), "Qo" (v->counter)
25424b44a66SWill Deacon 	);
25524b44a66SWill Deacon 
25624b44a66SWill Deacon 	return result;
25724b44a66SWill Deacon }
25824b44a66SWill Deacon 
25924b44a66SWill Deacon static inline void atomic64_set(atomic64_t *v, u64 i)
26024b44a66SWill Deacon {
26124b44a66SWill Deacon 	u64 tmp;
26224b44a66SWill Deacon 
26324b44a66SWill Deacon 	__asm__ __volatile__("@ atomic64_set\n"
264398aa668SWill Deacon "1:	ldrexd	%0, %H0, [%2]\n"
265398aa668SWill Deacon "	strexd	%0, %3, %H3, [%2]\n"
26624b44a66SWill Deacon "	teq	%0, #0\n"
26724b44a66SWill Deacon "	bne	1b"
268398aa668SWill Deacon 	: "=&r" (tmp), "=Qo" (v->counter)
26924b44a66SWill Deacon 	: "r" (&v->counter), "r" (i)
27024b44a66SWill Deacon 	: "cc");
27124b44a66SWill Deacon }
27224b44a66SWill Deacon 
27324b44a66SWill Deacon static inline void atomic64_add(u64 i, atomic64_t *v)
27424b44a66SWill Deacon {
27524b44a66SWill Deacon 	u64 result;
27624b44a66SWill Deacon 	unsigned long tmp;
27724b44a66SWill Deacon 
27824b44a66SWill Deacon 	__asm__ __volatile__("@ atomic64_add\n"
279398aa668SWill Deacon "1:	ldrexd	%0, %H0, [%3]\n"
280398aa668SWill Deacon "	adds	%0, %0, %4\n"
281398aa668SWill Deacon "	adc	%H0, %H0, %H4\n"
282398aa668SWill Deacon "	strexd	%1, %0, %H0, [%3]\n"
28324b44a66SWill Deacon "	teq	%1, #0\n"
28424b44a66SWill Deacon "	bne	1b"
285398aa668SWill Deacon 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
28624b44a66SWill Deacon 	: "r" (&v->counter), "r" (i)
28724b44a66SWill Deacon 	: "cc");
28824b44a66SWill Deacon }
28924b44a66SWill Deacon 
29024b44a66SWill Deacon static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
29124b44a66SWill Deacon {
29224b44a66SWill Deacon 	u64 result;
29324b44a66SWill Deacon 	unsigned long tmp;
29424b44a66SWill Deacon 
29524b44a66SWill Deacon 	smp_mb();
29624b44a66SWill Deacon 
29724b44a66SWill Deacon 	__asm__ __volatile__("@ atomic64_add_return\n"
298398aa668SWill Deacon "1:	ldrexd	%0, %H0, [%3]\n"
299398aa668SWill Deacon "	adds	%0, %0, %4\n"
300398aa668SWill Deacon "	adc	%H0, %H0, %H4\n"
301398aa668SWill Deacon "	strexd	%1, %0, %H0, [%3]\n"
30224b44a66SWill Deacon "	teq	%1, #0\n"
30324b44a66SWill Deacon "	bne	1b"
304398aa668SWill Deacon 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
30524b44a66SWill Deacon 	: "r" (&v->counter), "r" (i)
30624b44a66SWill Deacon 	: "cc");
30724b44a66SWill Deacon 
30824b44a66SWill Deacon 	smp_mb();
30924b44a66SWill Deacon 
31024b44a66SWill Deacon 	return result;
31124b44a66SWill Deacon }
31224b44a66SWill Deacon 
31324b44a66SWill Deacon static inline void atomic64_sub(u64 i, atomic64_t *v)
31424b44a66SWill Deacon {
31524b44a66SWill Deacon 	u64 result;
31624b44a66SWill Deacon 	unsigned long tmp;
31724b44a66SWill Deacon 
31824b44a66SWill Deacon 	__asm__ __volatile__("@ atomic64_sub\n"
319398aa668SWill Deacon "1:	ldrexd	%0, %H0, [%3]\n"
320398aa668SWill Deacon "	subs	%0, %0, %4\n"
321398aa668SWill Deacon "	sbc	%H0, %H0, %H4\n"
322398aa668SWill Deacon "	strexd	%1, %0, %H0, [%3]\n"
32324b44a66SWill Deacon "	teq	%1, #0\n"
32424b44a66SWill Deacon "	bne	1b"
325398aa668SWill Deacon 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
32624b44a66SWill Deacon 	: "r" (&v->counter), "r" (i)
32724b44a66SWill Deacon 	: "cc");
32824b44a66SWill Deacon }
32924b44a66SWill Deacon 
33024b44a66SWill Deacon static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
33124b44a66SWill Deacon {
33224b44a66SWill Deacon 	u64 result;
33324b44a66SWill Deacon 	unsigned long tmp;
33424b44a66SWill Deacon 
33524b44a66SWill Deacon 	smp_mb();
33624b44a66SWill Deacon 
33724b44a66SWill Deacon 	__asm__ __volatile__("@ atomic64_sub_return\n"
338398aa668SWill Deacon "1:	ldrexd	%0, %H0, [%3]\n"
339398aa668SWill Deacon "	subs	%0, %0, %4\n"
340398aa668SWill Deacon "	sbc	%H0, %H0, %H4\n"
341398aa668SWill Deacon "	strexd	%1, %0, %H0, [%3]\n"
34224b44a66SWill Deacon "	teq	%1, #0\n"
34324b44a66SWill Deacon "	bne	1b"
344398aa668SWill Deacon 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
34524b44a66SWill Deacon 	: "r" (&v->counter), "r" (i)
34624b44a66SWill Deacon 	: "cc");
34724b44a66SWill Deacon 
34824b44a66SWill Deacon 	smp_mb();
34924b44a66SWill Deacon 
35024b44a66SWill Deacon 	return result;
35124b44a66SWill Deacon }
35224b44a66SWill Deacon 
35324b44a66SWill Deacon static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
35424b44a66SWill Deacon {
35524b44a66SWill Deacon 	u64 oldval;
35624b44a66SWill Deacon 	unsigned long res;
35724b44a66SWill Deacon 
35824b44a66SWill Deacon 	smp_mb();
35924b44a66SWill Deacon 
36024b44a66SWill Deacon 	do {
36124b44a66SWill Deacon 		__asm__ __volatile__("@ atomic64_cmpxchg\n"
362398aa668SWill Deacon 		"ldrexd		%1, %H1, [%3]\n"
36324b44a66SWill Deacon 		"mov		%0, #0\n"
364398aa668SWill Deacon 		"teq		%1, %4\n"
365398aa668SWill Deacon 		"teqeq		%H1, %H4\n"
366398aa668SWill Deacon 		"strexdeq	%0, %5, %H5, [%3]"
367398aa668SWill Deacon 		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
36824b44a66SWill Deacon 		: "r" (&ptr->counter), "r" (old), "r" (new)
36924b44a66SWill Deacon 		: "cc");
37024b44a66SWill Deacon 	} while (res);
37124b44a66SWill Deacon 
37224b44a66SWill Deacon 	smp_mb();
37324b44a66SWill Deacon 
37424b44a66SWill Deacon 	return oldval;
37524b44a66SWill Deacon }
37624b44a66SWill Deacon 
37724b44a66SWill Deacon static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
37824b44a66SWill Deacon {
37924b44a66SWill Deacon 	u64 result;
38024b44a66SWill Deacon 	unsigned long tmp;
38124b44a66SWill Deacon 
38224b44a66SWill Deacon 	smp_mb();
38324b44a66SWill Deacon 
38424b44a66SWill Deacon 	__asm__ __volatile__("@ atomic64_xchg\n"
385398aa668SWill Deacon "1:	ldrexd	%0, %H0, [%3]\n"
386398aa668SWill Deacon "	strexd	%1, %4, %H4, [%3]\n"
38724b44a66SWill Deacon "	teq	%1, #0\n"
38824b44a66SWill Deacon "	bne	1b"
389398aa668SWill Deacon 	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
39024b44a66SWill Deacon 	: "r" (&ptr->counter), "r" (new)
39124b44a66SWill Deacon 	: "cc");
39224b44a66SWill Deacon 
39324b44a66SWill Deacon 	smp_mb();
39424b44a66SWill Deacon 
39524b44a66SWill Deacon 	return result;
39624b44a66SWill Deacon }
39724b44a66SWill Deacon 
39824b44a66SWill Deacon static inline u64 atomic64_dec_if_positive(atomic64_t *v)
39924b44a66SWill Deacon {
40024b44a66SWill Deacon 	u64 result;
40124b44a66SWill Deacon 	unsigned long tmp;
40224b44a66SWill Deacon 
40324b44a66SWill Deacon 	smp_mb();
40424b44a66SWill Deacon 
40524b44a66SWill Deacon 	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
406398aa668SWill Deacon "1:	ldrexd	%0, %H0, [%3]\n"
40724b44a66SWill Deacon "	subs	%0, %0, #1\n"
40824b44a66SWill Deacon "	sbc	%H0, %H0, #0\n"
40924b44a66SWill Deacon "	teq	%H0, #0\n"
41024b44a66SWill Deacon "	bmi	2f\n"
411398aa668SWill Deacon "	strexd	%1, %0, %H0, [%3]\n"
41224b44a66SWill Deacon "	teq	%1, #0\n"
41324b44a66SWill Deacon "	bne	1b\n"
41424b44a66SWill Deacon "2:"
415398aa668SWill Deacon 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
41624b44a66SWill Deacon 	: "r" (&v->counter)
41724b44a66SWill Deacon 	: "cc");
41824b44a66SWill Deacon 
41924b44a66SWill Deacon 	smp_mb();
42024b44a66SWill Deacon 
42124b44a66SWill Deacon 	return result;
42224b44a66SWill Deacon }
42324b44a66SWill Deacon 
42424b44a66SWill Deacon static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
42524b44a66SWill Deacon {
42624b44a66SWill Deacon 	u64 val;
42724b44a66SWill Deacon 	unsigned long tmp;
42824b44a66SWill Deacon 	int ret = 1;
42924b44a66SWill Deacon 
43024b44a66SWill Deacon 	smp_mb();
43124b44a66SWill Deacon 
43224b44a66SWill Deacon 	__asm__ __volatile__("@ atomic64_add_unless\n"
433398aa668SWill Deacon "1:	ldrexd	%0, %H0, [%4]\n"
434398aa668SWill Deacon "	teq	%0, %5\n"
435398aa668SWill Deacon "	teqeq	%H0, %H5\n"
43624b44a66SWill Deacon "	moveq	%1, #0\n"
43724b44a66SWill Deacon "	beq	2f\n"
438398aa668SWill Deacon "	adds	%0, %0, %6\n"
439398aa668SWill Deacon "	adc	%H0, %H0, %H6\n"
440398aa668SWill Deacon "	strexd	%2, %0, %H0, [%4]\n"
44124b44a66SWill Deacon "	teq	%2, #0\n"
44224b44a66SWill Deacon "	bne	1b\n"
44324b44a66SWill Deacon "2:"
444398aa668SWill Deacon 	: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
44524b44a66SWill Deacon 	: "r" (&v->counter), "r" (u), "r" (a)
44624b44a66SWill Deacon 	: "cc");
44724b44a66SWill Deacon 
44824b44a66SWill Deacon 	if (ret)
44924b44a66SWill Deacon 		smp_mb();
45024b44a66SWill Deacon 
45124b44a66SWill Deacon 	return ret;
45224b44a66SWill Deacon }
45324b44a66SWill Deacon 
45424b44a66SWill Deacon #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
45524b44a66SWill Deacon #define atomic64_inc(v)			atomic64_add(1LL, (v))
45624b44a66SWill Deacon #define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
45724b44a66SWill Deacon #define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
45824b44a66SWill Deacon #define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
45924b44a66SWill Deacon #define atomic64_dec(v)			atomic64_sub(1LL, (v))
46024b44a66SWill Deacon #define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
46124b44a66SWill Deacon #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
46224b44a66SWill Deacon #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
46324b44a66SWill Deacon 
4647847777aSArun Sharma #endif /* !CONFIG_GENERIC_ATOMIC64 */
4654baa9922SRussell King #endif
4664baa9922SRussell King #endif
467