1d2912cb1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
24baa9922SRussell King /*
34baa9922SRussell King * arch/arm/include/asm/atomic.h
44baa9922SRussell King *
54baa9922SRussell King * Copyright (C) 1996 Russell King.
64baa9922SRussell King * Copyright (C) 2002 Deep Blue Solutions Ltd.
74baa9922SRussell King */
84baa9922SRussell King #ifndef __ASM_ARM_ATOMIC_H
94baa9922SRussell King #define __ASM_ARM_ATOMIC_H
104baa9922SRussell King
114baa9922SRussell King #include <linux/compiler.h>
12f38d999cSWill Deacon #include <linux/prefetch.h>
13ea435467SMatthew Wilcox #include <linux/types.h>
149f97da78SDavid Howells #include <linux/irqflags.h>
159f97da78SDavid Howells #include <asm/barrier.h>
169f97da78SDavid Howells #include <asm/cmpxchg.h>
174baa9922SRussell King
184baa9922SRussell King #ifdef __KERNEL__
194baa9922SRussell King
20200b812dSCatalin Marinas /*
21200b812dSCatalin Marinas * On ARM, ordinary assignment (str instruction) doesn't clear the local
22200b812dSCatalin Marinas * strex/ldrex monitor on some implementations. The reason we can use it for
23200b812dSCatalin Marinas * atomic_set() is the clrex or dummy strex done on every exception return.
24200b812dSCatalin Marinas */
25fc63a6e0SMark Rutland #define arch_atomic_read(v) READ_ONCE((v)->counter)
26fc63a6e0SMark Rutland #define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
274baa9922SRussell King
284baa9922SRussell King #if __LINUX_ARM_ARCH__ >= 6
294baa9922SRussell King
304baa9922SRussell King /*
314baa9922SRussell King * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
324baa9922SRussell King * store exclusive to ensure that these are atomic. We may loop
33200b812dSCatalin Marinas * to ensure that the update happens.
344baa9922SRussell King */
35bac4e960SRussell King
36aee9a554SPeter Zijlstra #define ATOMIC_OP(op, c_op, asm_op) \
37fc63a6e0SMark Rutland static inline void arch_atomic_##op(int i, atomic_t *v) \
38aee9a554SPeter Zijlstra { \
39aee9a554SPeter Zijlstra unsigned long tmp; \
40aee9a554SPeter Zijlstra int result; \
41aee9a554SPeter Zijlstra \
42aee9a554SPeter Zijlstra prefetchw(&v->counter); \
43aee9a554SPeter Zijlstra __asm__ __volatile__("@ atomic_" #op "\n" \
44aee9a554SPeter Zijlstra "1: ldrex %0, [%3]\n" \
45aee9a554SPeter Zijlstra " " #asm_op " %0, %0, %4\n" \
46aee9a554SPeter Zijlstra " strex %1, %0, [%3]\n" \
47aee9a554SPeter Zijlstra " teq %1, #0\n" \
48aee9a554SPeter Zijlstra " bne 1b" \
49aee9a554SPeter Zijlstra : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
50aee9a554SPeter Zijlstra : "r" (&v->counter), "Ir" (i) \
51aee9a554SPeter Zijlstra : "cc"); \
52aee9a554SPeter Zijlstra } \
53bac4e960SRussell King
54aee9a554SPeter Zijlstra #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
55fc63a6e0SMark Rutland static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
56aee9a554SPeter Zijlstra { \
57aee9a554SPeter Zijlstra unsigned long tmp; \
58aee9a554SPeter Zijlstra int result; \
59aee9a554SPeter Zijlstra \
60aee9a554SPeter Zijlstra prefetchw(&v->counter); \
61aee9a554SPeter Zijlstra \
62aee9a554SPeter Zijlstra __asm__ __volatile__("@ atomic_" #op "_return\n" \
63aee9a554SPeter Zijlstra "1: ldrex %0, [%3]\n" \
64aee9a554SPeter Zijlstra " " #asm_op " %0, %0, %4\n" \
65aee9a554SPeter Zijlstra " strex %1, %0, [%3]\n" \
66aee9a554SPeter Zijlstra " teq %1, #0\n" \
67aee9a554SPeter Zijlstra " bne 1b" \
68aee9a554SPeter Zijlstra : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
69aee9a554SPeter Zijlstra : "r" (&v->counter), "Ir" (i) \
70aee9a554SPeter Zijlstra : "cc"); \
71aee9a554SPeter Zijlstra \
72aee9a554SPeter Zijlstra return result; \
734baa9922SRussell King }
744baa9922SRussell King
756da068c1SPeter Zijlstra #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
76fc63a6e0SMark Rutland static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
776da068c1SPeter Zijlstra { \
786da068c1SPeter Zijlstra unsigned long tmp; \
796da068c1SPeter Zijlstra int result, val; \
806da068c1SPeter Zijlstra \
816da068c1SPeter Zijlstra prefetchw(&v->counter); \
826da068c1SPeter Zijlstra \
836da068c1SPeter Zijlstra __asm__ __volatile__("@ atomic_fetch_" #op "\n" \
846da068c1SPeter Zijlstra "1: ldrex %0, [%4]\n" \
856da068c1SPeter Zijlstra " " #asm_op " %1, %0, %5\n" \
866da068c1SPeter Zijlstra " strex %2, %1, [%4]\n" \
876da068c1SPeter Zijlstra " teq %2, #0\n" \
886da068c1SPeter Zijlstra " bne 1b" \
896da068c1SPeter Zijlstra : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
906da068c1SPeter Zijlstra : "r" (&v->counter), "Ir" (i) \
916da068c1SPeter Zijlstra : "cc"); \
926da068c1SPeter Zijlstra \
936da068c1SPeter Zijlstra return result; \
946da068c1SPeter Zijlstra }
956da068c1SPeter Zijlstra
96fc63a6e0SMark Rutland #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
97fc63a6e0SMark Rutland #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
98fc63a6e0SMark Rutland #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
99fc63a6e0SMark Rutland #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
1006da068c1SPeter Zijlstra
101fc63a6e0SMark Rutland #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
102fc63a6e0SMark Rutland #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
103fc63a6e0SMark Rutland #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
104fc63a6e0SMark Rutland #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
1050ca326deSWill Deacon
arch_atomic_cmpxchg_relaxed(atomic_t * ptr,int old,int new)106fc63a6e0SMark Rutland static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
1074baa9922SRussell King {
1084dcc1cf7SChen Gang int oldval;
1094dcc1cf7SChen Gang unsigned long res;
1104baa9922SRussell King
111c32ffce0SWill Deacon prefetchw(&ptr->counter);
112bac4e960SRussell King
1134baa9922SRussell King do {
1144baa9922SRussell King __asm__ __volatile__("@ atomic_cmpxchg\n"
115398aa668SWill Deacon "ldrex %1, [%3]\n"
1164baa9922SRussell King "mov %0, #0\n"
117398aa668SWill Deacon "teq %1, %4\n"
118398aa668SWill Deacon "strexeq %0, %5, [%3]\n"
119398aa668SWill Deacon : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1204baa9922SRussell King : "r" (&ptr->counter), "Ir" (old), "r" (new)
1214baa9922SRussell King : "cc");
1224baa9922SRussell King } while (res);
1234baa9922SRussell King
1244baa9922SRussell King return oldval;
1254baa9922SRussell King }
126fc63a6e0SMark Rutland #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
1274baa9922SRussell King
arch_atomic_fetch_add_unless(atomic_t * v,int a,int u)128fc63a6e0SMark Rutland static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
129db38ee87SWill Deacon {
130db38ee87SWill Deacon int oldval, newval;
131db38ee87SWill Deacon unsigned long tmp;
132db38ee87SWill Deacon
133db38ee87SWill Deacon smp_mb();
134db38ee87SWill Deacon prefetchw(&v->counter);
135db38ee87SWill Deacon
136db38ee87SWill Deacon __asm__ __volatile__ ("@ atomic_add_unless\n"
137db38ee87SWill Deacon "1: ldrex %0, [%4]\n"
138db38ee87SWill Deacon " teq %0, %5\n"
139db38ee87SWill Deacon " beq 2f\n"
140db38ee87SWill Deacon " add %1, %0, %6\n"
141db38ee87SWill Deacon " strex %2, %1, [%4]\n"
142db38ee87SWill Deacon " teq %2, #0\n"
143db38ee87SWill Deacon " bne 1b\n"
144db38ee87SWill Deacon "2:"
145db38ee87SWill Deacon : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
146db38ee87SWill Deacon : "r" (&v->counter), "r" (u), "r" (a)
147db38ee87SWill Deacon : "cc");
148db38ee87SWill Deacon
149db38ee87SWill Deacon if (oldval != u)
150db38ee87SWill Deacon smp_mb();
151db38ee87SWill Deacon
152db38ee87SWill Deacon return oldval;
153db38ee87SWill Deacon }
154fc63a6e0SMark Rutland #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
155db38ee87SWill Deacon
1564baa9922SRussell King #else /* ARM_ARCH_6 */
1574baa9922SRussell King
1584baa9922SRussell King #ifdef CONFIG_SMP
1594baa9922SRussell King #error SMP not supported on pre-ARMv6 CPUs
1604baa9922SRussell King #endif
1614baa9922SRussell King
162aee9a554SPeter Zijlstra #define ATOMIC_OP(op, c_op, asm_op) \
163fc63a6e0SMark Rutland static inline void arch_atomic_##op(int i, atomic_t *v) \
164aee9a554SPeter Zijlstra { \
165aee9a554SPeter Zijlstra unsigned long flags; \
166aee9a554SPeter Zijlstra \
167aee9a554SPeter Zijlstra raw_local_irq_save(flags); \
168aee9a554SPeter Zijlstra v->counter c_op i; \
169aee9a554SPeter Zijlstra raw_local_irq_restore(flags); \
170aee9a554SPeter Zijlstra } \
1714baa9922SRussell King
172aee9a554SPeter Zijlstra #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
173fc63a6e0SMark Rutland static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
174aee9a554SPeter Zijlstra { \
175aee9a554SPeter Zijlstra unsigned long flags; \
176aee9a554SPeter Zijlstra int val; \
177aee9a554SPeter Zijlstra \
178aee9a554SPeter Zijlstra raw_local_irq_save(flags); \
179aee9a554SPeter Zijlstra v->counter c_op i; \
180aee9a554SPeter Zijlstra val = v->counter; \
181aee9a554SPeter Zijlstra raw_local_irq_restore(flags); \
182aee9a554SPeter Zijlstra \
183aee9a554SPeter Zijlstra return val; \
1844baa9922SRussell King }
1854baa9922SRussell King
1866da068c1SPeter Zijlstra #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
187fc63a6e0SMark Rutland static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
1886da068c1SPeter Zijlstra { \
1896da068c1SPeter Zijlstra unsigned long flags; \
1906da068c1SPeter Zijlstra int val; \
1916da068c1SPeter Zijlstra \
1926da068c1SPeter Zijlstra raw_local_irq_save(flags); \
1936da068c1SPeter Zijlstra val = v->counter; \
1946da068c1SPeter Zijlstra v->counter c_op i; \
1956da068c1SPeter Zijlstra raw_local_irq_restore(flags); \
1966da068c1SPeter Zijlstra \
1976da068c1SPeter Zijlstra return val; \
1986da068c1SPeter Zijlstra }
1996da068c1SPeter Zijlstra
200*d6cd3664SMark Rutland #define arch_atomic_add_return arch_atomic_add_return
201*d6cd3664SMark Rutland #define arch_atomic_sub_return arch_atomic_sub_return
202*d6cd3664SMark Rutland #define arch_atomic_fetch_add arch_atomic_fetch_add
203*d6cd3664SMark Rutland #define arch_atomic_fetch_sub arch_atomic_fetch_sub
204*d6cd3664SMark Rutland
205*d6cd3664SMark Rutland #define arch_atomic_fetch_and arch_atomic_fetch_and
206*d6cd3664SMark Rutland #define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
207*d6cd3664SMark Rutland #define arch_atomic_fetch_or arch_atomic_fetch_or
208*d6cd3664SMark Rutland #define arch_atomic_fetch_xor arch_atomic_fetch_xor
209*d6cd3664SMark Rutland
arch_atomic_cmpxchg(atomic_t * v,int old,int new)210fc63a6e0SMark Rutland static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
2114baa9922SRussell King {
2124baa9922SRussell King int ret;
2134baa9922SRussell King unsigned long flags;
2144baa9922SRussell King
2154baa9922SRussell King raw_local_irq_save(flags);
2164baa9922SRussell King ret = v->counter;
2174baa9922SRussell King if (likely(ret == old))
2184baa9922SRussell King v->counter = new;
2194baa9922SRussell King raw_local_irq_restore(flags);
2204baa9922SRussell King
2214baa9922SRussell King return ret;
2224baa9922SRussell King }
223d12157efSMark Rutland #define arch_atomic_cmpxchg arch_atomic_cmpxchg
2244baa9922SRussell King
225db38ee87SWill Deacon #endif /* __LINUX_ARM_ARCH__ */
226db38ee87SWill Deacon
227aee9a554SPeter Zijlstra #define ATOMIC_OPS(op, c_op, asm_op) \
228aee9a554SPeter Zijlstra ATOMIC_OP(op, c_op, asm_op) \
2296da068c1SPeter Zijlstra ATOMIC_OP_RETURN(op, c_op, asm_op) \
2306da068c1SPeter Zijlstra ATOMIC_FETCH_OP(op, c_op, asm_op)
231aee9a554SPeter Zijlstra
232aee9a554SPeter Zijlstra ATOMIC_OPS(add, +=, add)
233aee9a554SPeter Zijlstra ATOMIC_OPS(sub, -=, sub)
234aee9a554SPeter Zijlstra
235fc63a6e0SMark Rutland #define arch_atomic_andnot arch_atomic_andnot
23612589790SPeter Zijlstra
2376da068c1SPeter Zijlstra #undef ATOMIC_OPS
2386da068c1SPeter Zijlstra #define ATOMIC_OPS(op, c_op, asm_op) \
2396da068c1SPeter Zijlstra ATOMIC_OP(op, c_op, asm_op) \
2406da068c1SPeter Zijlstra ATOMIC_FETCH_OP(op, c_op, asm_op)
2416da068c1SPeter Zijlstra
2426da068c1SPeter Zijlstra ATOMIC_OPS(and, &=, and)
2436da068c1SPeter Zijlstra ATOMIC_OPS(andnot, &= ~, bic)
2446da068c1SPeter Zijlstra ATOMIC_OPS(or, |=, orr)
2456da068c1SPeter Zijlstra ATOMIC_OPS(xor, ^=, eor)
24612589790SPeter Zijlstra
247aee9a554SPeter Zijlstra #undef ATOMIC_OPS
2486da068c1SPeter Zijlstra #undef ATOMIC_FETCH_OP
249aee9a554SPeter Zijlstra #undef ATOMIC_OP_RETURN
250aee9a554SPeter Zijlstra #undef ATOMIC_OP
251aee9a554SPeter Zijlstra
25224b44a66SWill Deacon #ifndef CONFIG_GENERIC_ATOMIC64
25324b44a66SWill Deacon typedef struct {
254ef4cdc09SMark Rutland s64 counter;
25524b44a66SWill Deacon } atomic64_t;
25624b44a66SWill Deacon
25724b44a66SWill Deacon #define ATOMIC64_INIT(i) { (i) }
25824b44a66SWill Deacon
2594fd75911SWill Deacon #ifdef CONFIG_ARM_LPAE
arch_atomic64_read(const atomic64_t * v)260fc63a6e0SMark Rutland static inline s64 arch_atomic64_read(const atomic64_t *v)
2614fd75911SWill Deacon {
262ef4cdc09SMark Rutland s64 result;
2634fd75911SWill Deacon
2644fd75911SWill Deacon __asm__ __volatile__("@ atomic64_read\n"
2654fd75911SWill Deacon " ldrd %0, %H0, [%1]"
2664fd75911SWill Deacon : "=&r" (result)
2674fd75911SWill Deacon : "r" (&v->counter), "Qo" (v->counter)
2684fd75911SWill Deacon );
2694fd75911SWill Deacon
2704fd75911SWill Deacon return result;
2714fd75911SWill Deacon }
2724fd75911SWill Deacon
arch_atomic64_set(atomic64_t * v,s64 i)273fc63a6e0SMark Rutland static inline void arch_atomic64_set(atomic64_t *v, s64 i)
2744fd75911SWill Deacon {
2754fd75911SWill Deacon __asm__ __volatile__("@ atomic64_set\n"
2764fd75911SWill Deacon " strd %2, %H2, [%1]"
2774fd75911SWill Deacon : "=Qo" (v->counter)
2784fd75911SWill Deacon : "r" (&v->counter), "r" (i)
2794fd75911SWill Deacon );
2804fd75911SWill Deacon }
2814fd75911SWill Deacon #else
arch_atomic64_read(const atomic64_t * v)282fc63a6e0SMark Rutland static inline s64 arch_atomic64_read(const atomic64_t *v)
28324b44a66SWill Deacon {
284ef4cdc09SMark Rutland s64 result;
28524b44a66SWill Deacon
28624b44a66SWill Deacon __asm__ __volatile__("@ atomic64_read\n"
28724b44a66SWill Deacon " ldrexd %0, %H0, [%1]"
28824b44a66SWill Deacon : "=&r" (result)
289398aa668SWill Deacon : "r" (&v->counter), "Qo" (v->counter)
29024b44a66SWill Deacon );
29124b44a66SWill Deacon
29224b44a66SWill Deacon return result;
29324b44a66SWill Deacon }
29424b44a66SWill Deacon
arch_atomic64_set(atomic64_t * v,s64 i)295fc63a6e0SMark Rutland static inline void arch_atomic64_set(atomic64_t *v, s64 i)
29624b44a66SWill Deacon {
297ef4cdc09SMark Rutland s64 tmp;
29824b44a66SWill Deacon
299f38d999cSWill Deacon prefetchw(&v->counter);
30024b44a66SWill Deacon __asm__ __volatile__("@ atomic64_set\n"
301398aa668SWill Deacon "1: ldrexd %0, %H0, [%2]\n"
302398aa668SWill Deacon " strexd %0, %3, %H3, [%2]\n"
30324b44a66SWill Deacon " teq %0, #0\n"
30424b44a66SWill Deacon " bne 1b"
305398aa668SWill Deacon : "=&r" (tmp), "=Qo" (v->counter)
30624b44a66SWill Deacon : "r" (&v->counter), "r" (i)
30724b44a66SWill Deacon : "cc");
30824b44a66SWill Deacon }
3094fd75911SWill Deacon #endif
31024b44a66SWill Deacon
311aee9a554SPeter Zijlstra #define ATOMIC64_OP(op, op1, op2) \
312fc63a6e0SMark Rutland static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
313aee9a554SPeter Zijlstra { \
314ef4cdc09SMark Rutland s64 result; \
315aee9a554SPeter Zijlstra unsigned long tmp; \
316aee9a554SPeter Zijlstra \
317aee9a554SPeter Zijlstra prefetchw(&v->counter); \
318aee9a554SPeter Zijlstra __asm__ __volatile__("@ atomic64_" #op "\n" \
319aee9a554SPeter Zijlstra "1: ldrexd %0, %H0, [%3]\n" \
320aee9a554SPeter Zijlstra " " #op1 " %Q0, %Q0, %Q4\n" \
321aee9a554SPeter Zijlstra " " #op2 " %R0, %R0, %R4\n" \
322aee9a554SPeter Zijlstra " strexd %1, %0, %H0, [%3]\n" \
323aee9a554SPeter Zijlstra " teq %1, #0\n" \
324aee9a554SPeter Zijlstra " bne 1b" \
325aee9a554SPeter Zijlstra : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
326aee9a554SPeter Zijlstra : "r" (&v->counter), "r" (i) \
327aee9a554SPeter Zijlstra : "cc"); \
328aee9a554SPeter Zijlstra } \
32924b44a66SWill Deacon
330aee9a554SPeter Zijlstra #define ATOMIC64_OP_RETURN(op, op1, op2) \
331ef4cdc09SMark Rutland static inline s64 \
332fc63a6e0SMark Rutland arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
333aee9a554SPeter Zijlstra { \
334ef4cdc09SMark Rutland s64 result; \
335aee9a554SPeter Zijlstra unsigned long tmp; \
336aee9a554SPeter Zijlstra \
337aee9a554SPeter Zijlstra prefetchw(&v->counter); \
338aee9a554SPeter Zijlstra \
339aee9a554SPeter Zijlstra __asm__ __volatile__("@ atomic64_" #op "_return\n" \
340aee9a554SPeter Zijlstra "1: ldrexd %0, %H0, [%3]\n" \
341aee9a554SPeter Zijlstra " " #op1 " %Q0, %Q0, %Q4\n" \
342aee9a554SPeter Zijlstra " " #op2 " %R0, %R0, %R4\n" \
343aee9a554SPeter Zijlstra " strexd %1, %0, %H0, [%3]\n" \
344aee9a554SPeter Zijlstra " teq %1, #0\n" \
345aee9a554SPeter Zijlstra " bne 1b" \
346aee9a554SPeter Zijlstra : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
347aee9a554SPeter Zijlstra : "r" (&v->counter), "r" (i) \
348aee9a554SPeter Zijlstra : "cc"); \
349aee9a554SPeter Zijlstra \
350aee9a554SPeter Zijlstra return result; \
35124b44a66SWill Deacon }
35224b44a66SWill Deacon
3536da068c1SPeter Zijlstra #define ATOMIC64_FETCH_OP(op, op1, op2) \
354ef4cdc09SMark Rutland static inline s64 \
355fc63a6e0SMark Rutland arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
3566da068c1SPeter Zijlstra { \
357ef4cdc09SMark Rutland s64 result, val; \
3586da068c1SPeter Zijlstra unsigned long tmp; \
3596da068c1SPeter Zijlstra \
3606da068c1SPeter Zijlstra prefetchw(&v->counter); \
3616da068c1SPeter Zijlstra \
3626da068c1SPeter Zijlstra __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
3636da068c1SPeter Zijlstra "1: ldrexd %0, %H0, [%4]\n" \
3646da068c1SPeter Zijlstra " " #op1 " %Q1, %Q0, %Q5\n" \
3656da068c1SPeter Zijlstra " " #op2 " %R1, %R0, %R5\n" \
3666da068c1SPeter Zijlstra " strexd %2, %1, %H1, [%4]\n" \
3676da068c1SPeter Zijlstra " teq %2, #0\n" \
3686da068c1SPeter Zijlstra " bne 1b" \
3696da068c1SPeter Zijlstra : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
3706da068c1SPeter Zijlstra : "r" (&v->counter), "r" (i) \
3716da068c1SPeter Zijlstra : "cc"); \
3726da068c1SPeter Zijlstra \
3736da068c1SPeter Zijlstra return result; \
3746da068c1SPeter Zijlstra }
3756da068c1SPeter Zijlstra
376aee9a554SPeter Zijlstra #define ATOMIC64_OPS(op, op1, op2) \
377aee9a554SPeter Zijlstra ATOMIC64_OP(op, op1, op2) \
3786da068c1SPeter Zijlstra ATOMIC64_OP_RETURN(op, op1, op2) \
3796da068c1SPeter Zijlstra ATOMIC64_FETCH_OP(op, op1, op2)
38024b44a66SWill Deacon
ATOMIC64_OPS(add,adds,adc)381aee9a554SPeter Zijlstra ATOMIC64_OPS(add, adds, adc)
382aee9a554SPeter Zijlstra ATOMIC64_OPS(sub, subs, sbc)
38324b44a66SWill Deacon
384fc63a6e0SMark Rutland #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
385fc63a6e0SMark Rutland #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
386fc63a6e0SMark Rutland #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
387fc63a6e0SMark Rutland #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
3886da068c1SPeter Zijlstra
3896da068c1SPeter Zijlstra #undef ATOMIC64_OPS
3906da068c1SPeter Zijlstra #define ATOMIC64_OPS(op, op1, op2) \
3916da068c1SPeter Zijlstra ATOMIC64_OP(op, op1, op2) \
3926da068c1SPeter Zijlstra ATOMIC64_FETCH_OP(op, op1, op2)
3930ca326deSWill Deacon
394fc63a6e0SMark Rutland #define arch_atomic64_andnot arch_atomic64_andnot
39512589790SPeter Zijlstra
3966da068c1SPeter Zijlstra ATOMIC64_OPS(and, and, and)
3976da068c1SPeter Zijlstra ATOMIC64_OPS(andnot, bic, bic)
3986da068c1SPeter Zijlstra ATOMIC64_OPS(or, orr, orr)
3996da068c1SPeter Zijlstra ATOMIC64_OPS(xor, eor, eor)
4006da068c1SPeter Zijlstra
401fc63a6e0SMark Rutland #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
402fc63a6e0SMark Rutland #define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
403fc63a6e0SMark Rutland #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
404fc63a6e0SMark Rutland #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
40512589790SPeter Zijlstra
406aee9a554SPeter Zijlstra #undef ATOMIC64_OPS
4076da068c1SPeter Zijlstra #undef ATOMIC64_FETCH_OP
408aee9a554SPeter Zijlstra #undef ATOMIC64_OP_RETURN
409aee9a554SPeter Zijlstra #undef ATOMIC64_OP
41024b44a66SWill Deacon
411fc63a6e0SMark Rutland static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
41224b44a66SWill Deacon {
413ef4cdc09SMark Rutland s64 oldval;
41424b44a66SWill Deacon unsigned long res;
41524b44a66SWill Deacon
416c32ffce0SWill Deacon prefetchw(&ptr->counter);
41724b44a66SWill Deacon
41824b44a66SWill Deacon do {
41924b44a66SWill Deacon __asm__ __volatile__("@ atomic64_cmpxchg\n"
420398aa668SWill Deacon "ldrexd %1, %H1, [%3]\n"
42124b44a66SWill Deacon "mov %0, #0\n"
422398aa668SWill Deacon "teq %1, %4\n"
423398aa668SWill Deacon "teqeq %H1, %H4\n"
424398aa668SWill Deacon "strexdeq %0, %5, %H5, [%3]"
425398aa668SWill Deacon : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
42624b44a66SWill Deacon : "r" (&ptr->counter), "r" (old), "r" (new)
42724b44a66SWill Deacon : "cc");
42824b44a66SWill Deacon } while (res);
42924b44a66SWill Deacon
43024b44a66SWill Deacon return oldval;
43124b44a66SWill Deacon }
432fc63a6e0SMark Rutland #define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed
43324b44a66SWill Deacon
arch_atomic64_xchg_relaxed(atomic64_t * ptr,s64 new)434fc63a6e0SMark Rutland static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
43524b44a66SWill Deacon {
436ef4cdc09SMark Rutland s64 result;
43724b44a66SWill Deacon unsigned long tmp;
43824b44a66SWill Deacon
439c32ffce0SWill Deacon prefetchw(&ptr->counter);
44024b44a66SWill Deacon
44124b44a66SWill Deacon __asm__ __volatile__("@ atomic64_xchg\n"
442398aa668SWill Deacon "1: ldrexd %0, %H0, [%3]\n"
443398aa668SWill Deacon " strexd %1, %4, %H4, [%3]\n"
44424b44a66SWill Deacon " teq %1, #0\n"
44524b44a66SWill Deacon " bne 1b"
446398aa668SWill Deacon : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
44724b44a66SWill Deacon : "r" (&ptr->counter), "r" (new)
44824b44a66SWill Deacon : "cc");
44924b44a66SWill Deacon
45024b44a66SWill Deacon return result;
45124b44a66SWill Deacon }
452fc63a6e0SMark Rutland #define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed
45324b44a66SWill Deacon
arch_atomic64_dec_if_positive(atomic64_t * v)454fc63a6e0SMark Rutland static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
45524b44a66SWill Deacon {
456ef4cdc09SMark Rutland s64 result;
45724b44a66SWill Deacon unsigned long tmp;
45824b44a66SWill Deacon
45924b44a66SWill Deacon smp_mb();
460c32ffce0SWill Deacon prefetchw(&v->counter);
46124b44a66SWill Deacon
46224b44a66SWill Deacon __asm__ __volatile__("@ atomic64_dec_if_positive\n"
463398aa668SWill Deacon "1: ldrexd %0, %H0, [%3]\n"
4642245f924SVictor Kamensky " subs %Q0, %Q0, #1\n"
4652245f924SVictor Kamensky " sbc %R0, %R0, #0\n"
4662245f924SVictor Kamensky " teq %R0, #0\n"
46724b44a66SWill Deacon " bmi 2f\n"
468398aa668SWill Deacon " strexd %1, %0, %H0, [%3]\n"
46924b44a66SWill Deacon " teq %1, #0\n"
47024b44a66SWill Deacon " bne 1b\n"
47124b44a66SWill Deacon "2:"
472398aa668SWill Deacon : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
47324b44a66SWill Deacon : "r" (&v->counter)
47424b44a66SWill Deacon : "cc");
47524b44a66SWill Deacon
47624b44a66SWill Deacon smp_mb();
47724b44a66SWill Deacon
47824b44a66SWill Deacon return result;
47924b44a66SWill Deacon }
480fc63a6e0SMark Rutland #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
48124b44a66SWill Deacon
arch_atomic64_fetch_add_unless(atomic64_t * v,s64 a,s64 u)482fc63a6e0SMark Rutland static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
48324b44a66SWill Deacon {
484ef4cdc09SMark Rutland s64 oldval, newval;
48524b44a66SWill Deacon unsigned long tmp;
48624b44a66SWill Deacon
48724b44a66SWill Deacon smp_mb();
488c32ffce0SWill Deacon prefetchw(&v->counter);
48924b44a66SWill Deacon
49024b44a66SWill Deacon __asm__ __volatile__("@ atomic64_add_unless\n"
491398aa668SWill Deacon "1: ldrexd %0, %H0, [%4]\n"
492398aa668SWill Deacon " teq %0, %5\n"
493398aa668SWill Deacon " teqeq %H0, %H5\n"
49424b44a66SWill Deacon " beq 2f\n"
495fee8ca9fSMark Rutland " adds %Q1, %Q0, %Q6\n"
496fee8ca9fSMark Rutland " adc %R1, %R0, %R6\n"
497fee8ca9fSMark Rutland " strexd %2, %1, %H1, [%4]\n"
49824b44a66SWill Deacon " teq %2, #0\n"
49924b44a66SWill Deacon " bne 1b\n"
50024b44a66SWill Deacon "2:"
501fee8ca9fSMark Rutland : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
50224b44a66SWill Deacon : "r" (&v->counter), "r" (u), "r" (a)
50324b44a66SWill Deacon : "cc");
50424b44a66SWill Deacon
505fee8ca9fSMark Rutland if (oldval != u)
50624b44a66SWill Deacon smp_mb();
50724b44a66SWill Deacon
508fee8ca9fSMark Rutland return oldval;
50924b44a66SWill Deacon }
510fc63a6e0SMark Rutland #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
51124b44a66SWill Deacon
5127847777aSArun Sharma #endif /* !CONFIG_GENERIC_ATOMIC64 */
5134baa9922SRussell King #endif
5144baa9922SRussell King #endif
515