1d2912cb1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */ 24baa9922SRussell King /* 34baa9922SRussell King * arch/arm/include/asm/atomic.h 44baa9922SRussell King * 54baa9922SRussell King * Copyright (C) 1996 Russell King. 64baa9922SRussell King * Copyright (C) 2002 Deep Blue Solutions Ltd. 74baa9922SRussell King */ 84baa9922SRussell King #ifndef __ASM_ARM_ATOMIC_H 94baa9922SRussell King #define __ASM_ARM_ATOMIC_H 104baa9922SRussell King 114baa9922SRussell King #include <linux/compiler.h> 12f38d999cSWill Deacon #include <linux/prefetch.h> 13ea435467SMatthew Wilcox #include <linux/types.h> 149f97da78SDavid Howells #include <linux/irqflags.h> 159f97da78SDavid Howells #include <asm/barrier.h> 169f97da78SDavid Howells #include <asm/cmpxchg.h> 174baa9922SRussell King 184baa9922SRussell King #ifdef __KERNEL__ 194baa9922SRussell King 20200b812dSCatalin Marinas /* 21200b812dSCatalin Marinas * On ARM, ordinary assignment (str instruction) doesn't clear the local 22200b812dSCatalin Marinas * strex/ldrex monitor on some implementations. The reason we can use it for 23200b812dSCatalin Marinas * atomic_set() is the clrex or dummy strex done on every exception return. 24200b812dSCatalin Marinas */ 25fc63a6e0SMark Rutland #define arch_atomic_read(v) READ_ONCE((v)->counter) 26fc63a6e0SMark Rutland #define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i)) 274baa9922SRussell King 284baa9922SRussell King #if __LINUX_ARM_ARCH__ >= 6 294baa9922SRussell King 304baa9922SRussell King /* 314baa9922SRussell King * ARMv6 UP and SMP safe atomic ops. We use load exclusive and 324baa9922SRussell King * store exclusive to ensure that these are atomic. We may loop 33200b812dSCatalin Marinas * to ensure that the update happens. 344baa9922SRussell King */ 35bac4e960SRussell King 36aee9a554SPeter Zijlstra #define ATOMIC_OP(op, c_op, asm_op) \ 37fc63a6e0SMark Rutland static inline void arch_atomic_##op(int i, atomic_t *v) \ 38aee9a554SPeter Zijlstra { \ 39aee9a554SPeter Zijlstra unsigned long tmp; \ 40aee9a554SPeter Zijlstra int result; \ 41aee9a554SPeter Zijlstra \ 42aee9a554SPeter Zijlstra prefetchw(&v->counter); \ 43aee9a554SPeter Zijlstra __asm__ __volatile__("@ atomic_" #op "\n" \ 44aee9a554SPeter Zijlstra "1: ldrex %0, [%3]\n" \ 45aee9a554SPeter Zijlstra " " #asm_op " %0, %0, %4\n" \ 46aee9a554SPeter Zijlstra " strex %1, %0, [%3]\n" \ 47aee9a554SPeter Zijlstra " teq %1, #0\n" \ 48aee9a554SPeter Zijlstra " bne 1b" \ 49aee9a554SPeter Zijlstra : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ 50aee9a554SPeter Zijlstra : "r" (&v->counter), "Ir" (i) \ 51aee9a554SPeter Zijlstra : "cc"); \ 52aee9a554SPeter Zijlstra } \ 53bac4e960SRussell King 54aee9a554SPeter Zijlstra #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 55fc63a6e0SMark Rutland static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ 56aee9a554SPeter Zijlstra { \ 57aee9a554SPeter Zijlstra unsigned long tmp; \ 58aee9a554SPeter Zijlstra int result; \ 59aee9a554SPeter Zijlstra \ 60aee9a554SPeter Zijlstra prefetchw(&v->counter); \ 61aee9a554SPeter Zijlstra \ 62aee9a554SPeter Zijlstra __asm__ __volatile__("@ atomic_" #op "_return\n" \ 63aee9a554SPeter Zijlstra "1: ldrex %0, [%3]\n" \ 64aee9a554SPeter Zijlstra " " #asm_op " %0, %0, %4\n" \ 65aee9a554SPeter Zijlstra " strex %1, %0, [%3]\n" \ 66aee9a554SPeter Zijlstra " teq %1, #0\n" \ 67aee9a554SPeter Zijlstra " bne 1b" \ 68aee9a554SPeter Zijlstra : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ 69aee9a554SPeter Zijlstra : "r" (&v->counter), "Ir" (i) \ 70aee9a554SPeter Zijlstra : "cc"); \ 71aee9a554SPeter Zijlstra \ 72aee9a554SPeter Zijlstra return result; \ 734baa9922SRussell King } 744baa9922SRussell King 756da068c1SPeter Zijlstra #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ 76fc63a6e0SMark Rutland static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ 776da068c1SPeter Zijlstra { \ 786da068c1SPeter Zijlstra unsigned long tmp; \ 796da068c1SPeter Zijlstra int result, val; \ 806da068c1SPeter Zijlstra \ 816da068c1SPeter Zijlstra prefetchw(&v->counter); \ 826da068c1SPeter Zijlstra \ 836da068c1SPeter Zijlstra __asm__ __volatile__("@ atomic_fetch_" #op "\n" \ 846da068c1SPeter Zijlstra "1: ldrex %0, [%4]\n" \ 856da068c1SPeter Zijlstra " " #asm_op " %1, %0, %5\n" \ 866da068c1SPeter Zijlstra " strex %2, %1, [%4]\n" \ 876da068c1SPeter Zijlstra " teq %2, #0\n" \ 886da068c1SPeter Zijlstra " bne 1b" \ 896da068c1SPeter Zijlstra : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \ 906da068c1SPeter Zijlstra : "r" (&v->counter), "Ir" (i) \ 916da068c1SPeter Zijlstra : "cc"); \ 926da068c1SPeter Zijlstra \ 936da068c1SPeter Zijlstra return result; \ 946da068c1SPeter Zijlstra } 956da068c1SPeter Zijlstra 96fc63a6e0SMark Rutland #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed 97fc63a6e0SMark Rutland #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed 98fc63a6e0SMark Rutland #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed 99fc63a6e0SMark Rutland #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed 1006da068c1SPeter Zijlstra 101fc63a6e0SMark Rutland #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed 102fc63a6e0SMark Rutland #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed 103fc63a6e0SMark Rutland #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed 104fc63a6e0SMark Rutland #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed 1050ca326deSWill Deacon 106fc63a6e0SMark Rutland static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) 1074baa9922SRussell King { 1084dcc1cf7SChen Gang int oldval; 1094dcc1cf7SChen Gang unsigned long res; 1104baa9922SRussell King 111c32ffce0SWill Deacon prefetchw(&ptr->counter); 112bac4e960SRussell King 1134baa9922SRussell King do { 1144baa9922SRussell King __asm__ __volatile__("@ atomic_cmpxchg\n" 115398aa668SWill Deacon "ldrex %1, [%3]\n" 1164baa9922SRussell King "mov %0, #0\n" 117398aa668SWill Deacon "teq %1, %4\n" 118398aa668SWill Deacon "strexeq %0, %5, [%3]\n" 119398aa668SWill Deacon : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) 1204baa9922SRussell King : "r" (&ptr->counter), "Ir" (old), "r" (new) 1214baa9922SRussell King : "cc"); 1224baa9922SRussell King } while (res); 1234baa9922SRussell King 1244baa9922SRussell King return oldval; 1254baa9922SRussell King } 126fc63a6e0SMark Rutland #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed 1274baa9922SRussell King 128fc63a6e0SMark Rutland static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) 129db38ee87SWill Deacon { 130db38ee87SWill Deacon int oldval, newval; 131db38ee87SWill Deacon unsigned long tmp; 132db38ee87SWill Deacon 133db38ee87SWill Deacon smp_mb(); 134db38ee87SWill Deacon prefetchw(&v->counter); 135db38ee87SWill Deacon 136db38ee87SWill Deacon __asm__ __volatile__ ("@ atomic_add_unless\n" 137db38ee87SWill Deacon "1: ldrex %0, [%4]\n" 138db38ee87SWill Deacon " teq %0, %5\n" 139db38ee87SWill Deacon " beq 2f\n" 140db38ee87SWill Deacon " add %1, %0, %6\n" 141db38ee87SWill Deacon " strex %2, %1, [%4]\n" 142db38ee87SWill Deacon " teq %2, #0\n" 143db38ee87SWill Deacon " bne 1b\n" 144db38ee87SWill Deacon "2:" 145db38ee87SWill Deacon : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) 146db38ee87SWill Deacon : "r" (&v->counter), "r" (u), "r" (a) 147db38ee87SWill Deacon : "cc"); 148db38ee87SWill Deacon 149db38ee87SWill Deacon if (oldval != u) 150db38ee87SWill Deacon smp_mb(); 151db38ee87SWill Deacon 152db38ee87SWill Deacon return oldval; 153db38ee87SWill Deacon } 154fc63a6e0SMark Rutland #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless 155db38ee87SWill Deacon 1564baa9922SRussell King #else /* ARM_ARCH_6 */ 1574baa9922SRussell King 1584baa9922SRussell King #ifdef CONFIG_SMP 1594baa9922SRussell King #error SMP not supported on pre-ARMv6 CPUs 1604baa9922SRussell King #endif 1614baa9922SRussell King 162aee9a554SPeter Zijlstra #define ATOMIC_OP(op, c_op, asm_op) \ 163fc63a6e0SMark Rutland static inline void arch_atomic_##op(int i, atomic_t *v) \ 164aee9a554SPeter Zijlstra { \ 165aee9a554SPeter Zijlstra unsigned long flags; \ 166aee9a554SPeter Zijlstra \ 167aee9a554SPeter Zijlstra raw_local_irq_save(flags); \ 168aee9a554SPeter Zijlstra v->counter c_op i; \ 169aee9a554SPeter Zijlstra raw_local_irq_restore(flags); \ 170aee9a554SPeter Zijlstra } \ 1714baa9922SRussell King 172aee9a554SPeter Zijlstra #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 173fc63a6e0SMark Rutland static inline int arch_atomic_##op##_return(int i, atomic_t *v) \ 174aee9a554SPeter Zijlstra { \ 175aee9a554SPeter Zijlstra unsigned long flags; \ 176aee9a554SPeter Zijlstra int val; \ 177aee9a554SPeter Zijlstra \ 178aee9a554SPeter Zijlstra raw_local_irq_save(flags); \ 179aee9a554SPeter Zijlstra v->counter c_op i; \ 180aee9a554SPeter Zijlstra val = v->counter; \ 181aee9a554SPeter Zijlstra raw_local_irq_restore(flags); \ 182aee9a554SPeter Zijlstra \ 183aee9a554SPeter Zijlstra return val; \ 1844baa9922SRussell King } 1854baa9922SRussell King 1866da068c1SPeter Zijlstra #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ 187fc63a6e0SMark Rutland static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ 1886da068c1SPeter Zijlstra { \ 1896da068c1SPeter Zijlstra unsigned long flags; \ 1906da068c1SPeter Zijlstra int val; \ 1916da068c1SPeter Zijlstra \ 1926da068c1SPeter Zijlstra raw_local_irq_save(flags); \ 1936da068c1SPeter Zijlstra val = v->counter; \ 1946da068c1SPeter Zijlstra v->counter c_op i; \ 1956da068c1SPeter Zijlstra raw_local_irq_restore(flags); \ 1966da068c1SPeter Zijlstra \ 1976da068c1SPeter Zijlstra return val; \ 1986da068c1SPeter Zijlstra } 1996da068c1SPeter Zijlstra 200fc63a6e0SMark Rutland static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) 2014baa9922SRussell King { 2024baa9922SRussell King int ret; 2034baa9922SRussell King unsigned long flags; 2044baa9922SRussell King 2054baa9922SRussell King raw_local_irq_save(flags); 2064baa9922SRussell King ret = v->counter; 2074baa9922SRussell King if (likely(ret == old)) 2084baa9922SRussell King v->counter = new; 2094baa9922SRussell King raw_local_irq_restore(flags); 2104baa9922SRussell King 2114baa9922SRussell King return ret; 2124baa9922SRussell King } 213*d12157efSMark Rutland #define arch_atomic_cmpxchg arch_atomic_cmpxchg 2144baa9922SRussell King 215fc63a6e0SMark Rutland #define arch_atomic_fetch_andnot arch_atomic_fetch_andnot 2167cc7eaadSMark Rutland 217db38ee87SWill Deacon #endif /* __LINUX_ARM_ARCH__ */ 218db38ee87SWill Deacon 219aee9a554SPeter Zijlstra #define ATOMIC_OPS(op, c_op, asm_op) \ 220aee9a554SPeter Zijlstra ATOMIC_OP(op, c_op, asm_op) \ 2216da068c1SPeter Zijlstra ATOMIC_OP_RETURN(op, c_op, asm_op) \ 2226da068c1SPeter Zijlstra ATOMIC_FETCH_OP(op, c_op, asm_op) 223aee9a554SPeter Zijlstra 224aee9a554SPeter Zijlstra ATOMIC_OPS(add, +=, add) 225aee9a554SPeter Zijlstra ATOMIC_OPS(sub, -=, sub) 226aee9a554SPeter Zijlstra 227fc63a6e0SMark Rutland #define arch_atomic_andnot arch_atomic_andnot 22812589790SPeter Zijlstra 2296da068c1SPeter Zijlstra #undef ATOMIC_OPS 2306da068c1SPeter Zijlstra #define ATOMIC_OPS(op, c_op, asm_op) \ 2316da068c1SPeter Zijlstra ATOMIC_OP(op, c_op, asm_op) \ 2326da068c1SPeter Zijlstra ATOMIC_FETCH_OP(op, c_op, asm_op) 2336da068c1SPeter Zijlstra 2346da068c1SPeter Zijlstra ATOMIC_OPS(and, &=, and) 2356da068c1SPeter Zijlstra ATOMIC_OPS(andnot, &= ~, bic) 2366da068c1SPeter Zijlstra ATOMIC_OPS(or, |=, orr) 2376da068c1SPeter Zijlstra ATOMIC_OPS(xor, ^=, eor) 23812589790SPeter Zijlstra 239aee9a554SPeter Zijlstra #undef ATOMIC_OPS 2406da068c1SPeter Zijlstra #undef ATOMIC_FETCH_OP 241aee9a554SPeter Zijlstra #undef ATOMIC_OP_RETURN 242aee9a554SPeter Zijlstra #undef ATOMIC_OP 243aee9a554SPeter Zijlstra 24424b44a66SWill Deacon #ifndef CONFIG_GENERIC_ATOMIC64 24524b44a66SWill Deacon typedef struct { 246ef4cdc09SMark Rutland s64 counter; 24724b44a66SWill Deacon } atomic64_t; 24824b44a66SWill Deacon 24924b44a66SWill Deacon #define ATOMIC64_INIT(i) { (i) } 25024b44a66SWill Deacon 2514fd75911SWill Deacon #ifdef CONFIG_ARM_LPAE 252fc63a6e0SMark Rutland static inline s64 arch_atomic64_read(const atomic64_t *v) 2534fd75911SWill Deacon { 254ef4cdc09SMark Rutland s64 result; 2554fd75911SWill Deacon 2564fd75911SWill Deacon __asm__ __volatile__("@ atomic64_read\n" 2574fd75911SWill Deacon " ldrd %0, %H0, [%1]" 2584fd75911SWill Deacon : "=&r" (result) 2594fd75911SWill Deacon : "r" (&v->counter), "Qo" (v->counter) 2604fd75911SWill Deacon ); 2614fd75911SWill Deacon 2624fd75911SWill Deacon return result; 2634fd75911SWill Deacon } 2644fd75911SWill Deacon 265fc63a6e0SMark Rutland static inline void arch_atomic64_set(atomic64_t *v, s64 i) 2664fd75911SWill Deacon { 2674fd75911SWill Deacon __asm__ __volatile__("@ atomic64_set\n" 2684fd75911SWill Deacon " strd %2, %H2, [%1]" 2694fd75911SWill Deacon : "=Qo" (v->counter) 2704fd75911SWill Deacon : "r" (&v->counter), "r" (i) 2714fd75911SWill Deacon ); 2724fd75911SWill Deacon } 2734fd75911SWill Deacon #else 274fc63a6e0SMark Rutland static inline s64 arch_atomic64_read(const atomic64_t *v) 27524b44a66SWill Deacon { 276ef4cdc09SMark Rutland s64 result; 27724b44a66SWill Deacon 27824b44a66SWill Deacon __asm__ __volatile__("@ atomic64_read\n" 27924b44a66SWill Deacon " ldrexd %0, %H0, [%1]" 28024b44a66SWill Deacon : "=&r" (result) 281398aa668SWill Deacon : "r" (&v->counter), "Qo" (v->counter) 28224b44a66SWill Deacon ); 28324b44a66SWill Deacon 28424b44a66SWill Deacon return result; 28524b44a66SWill Deacon } 28624b44a66SWill Deacon 287fc63a6e0SMark Rutland static inline void arch_atomic64_set(atomic64_t *v, s64 i) 28824b44a66SWill Deacon { 289ef4cdc09SMark Rutland s64 tmp; 29024b44a66SWill Deacon 291f38d999cSWill Deacon prefetchw(&v->counter); 29224b44a66SWill Deacon __asm__ __volatile__("@ atomic64_set\n" 293398aa668SWill Deacon "1: ldrexd %0, %H0, [%2]\n" 294398aa668SWill Deacon " strexd %0, %3, %H3, [%2]\n" 29524b44a66SWill Deacon " teq %0, #0\n" 29624b44a66SWill Deacon " bne 1b" 297398aa668SWill Deacon : "=&r" (tmp), "=Qo" (v->counter) 29824b44a66SWill Deacon : "r" (&v->counter), "r" (i) 29924b44a66SWill Deacon : "cc"); 30024b44a66SWill Deacon } 3014fd75911SWill Deacon #endif 30224b44a66SWill Deacon 303aee9a554SPeter Zijlstra #define ATOMIC64_OP(op, op1, op2) \ 304fc63a6e0SMark Rutland static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \ 305aee9a554SPeter Zijlstra { \ 306ef4cdc09SMark Rutland s64 result; \ 307aee9a554SPeter Zijlstra unsigned long tmp; \ 308aee9a554SPeter Zijlstra \ 309aee9a554SPeter Zijlstra prefetchw(&v->counter); \ 310aee9a554SPeter Zijlstra __asm__ __volatile__("@ atomic64_" #op "\n" \ 311aee9a554SPeter Zijlstra "1: ldrexd %0, %H0, [%3]\n" \ 312aee9a554SPeter Zijlstra " " #op1 " %Q0, %Q0, %Q4\n" \ 313aee9a554SPeter Zijlstra " " #op2 " %R0, %R0, %R4\n" \ 314aee9a554SPeter Zijlstra " strexd %1, %0, %H0, [%3]\n" \ 315aee9a554SPeter Zijlstra " teq %1, #0\n" \ 316aee9a554SPeter Zijlstra " bne 1b" \ 317aee9a554SPeter Zijlstra : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ 318aee9a554SPeter Zijlstra : "r" (&v->counter), "r" (i) \ 319aee9a554SPeter Zijlstra : "cc"); \ 320aee9a554SPeter Zijlstra } \ 32124b44a66SWill Deacon 322aee9a554SPeter Zijlstra #define ATOMIC64_OP_RETURN(op, op1, op2) \ 323ef4cdc09SMark Rutland static inline s64 \ 324fc63a6e0SMark Rutland arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \ 325aee9a554SPeter Zijlstra { \ 326ef4cdc09SMark Rutland s64 result; \ 327aee9a554SPeter Zijlstra unsigned long tmp; \ 328aee9a554SPeter Zijlstra \ 329aee9a554SPeter Zijlstra prefetchw(&v->counter); \ 330aee9a554SPeter Zijlstra \ 331aee9a554SPeter Zijlstra __asm__ __volatile__("@ atomic64_" #op "_return\n" \ 332aee9a554SPeter Zijlstra "1: ldrexd %0, %H0, [%3]\n" \ 333aee9a554SPeter Zijlstra " " #op1 " %Q0, %Q0, %Q4\n" \ 334aee9a554SPeter Zijlstra " " #op2 " %R0, %R0, %R4\n" \ 335aee9a554SPeter Zijlstra " strexd %1, %0, %H0, [%3]\n" \ 336aee9a554SPeter Zijlstra " teq %1, #0\n" \ 337aee9a554SPeter Zijlstra " bne 1b" \ 338aee9a554SPeter Zijlstra : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ 339aee9a554SPeter Zijlstra : "r" (&v->counter), "r" (i) \ 340aee9a554SPeter Zijlstra : "cc"); \ 341aee9a554SPeter Zijlstra \ 342aee9a554SPeter Zijlstra return result; \ 34324b44a66SWill Deacon } 34424b44a66SWill Deacon 3456da068c1SPeter Zijlstra #define ATOMIC64_FETCH_OP(op, op1, op2) \ 346ef4cdc09SMark Rutland static inline s64 \ 347fc63a6e0SMark Rutland arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \ 3486da068c1SPeter Zijlstra { \ 349ef4cdc09SMark Rutland s64 result, val; \ 3506da068c1SPeter Zijlstra unsigned long tmp; \ 3516da068c1SPeter Zijlstra \ 3526da068c1SPeter Zijlstra prefetchw(&v->counter); \ 3536da068c1SPeter Zijlstra \ 3546da068c1SPeter Zijlstra __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \ 3556da068c1SPeter Zijlstra "1: ldrexd %0, %H0, [%4]\n" \ 3566da068c1SPeter Zijlstra " " #op1 " %Q1, %Q0, %Q5\n" \ 3576da068c1SPeter Zijlstra " " #op2 " %R1, %R0, %R5\n" \ 3586da068c1SPeter Zijlstra " strexd %2, %1, %H1, [%4]\n" \ 3596da068c1SPeter Zijlstra " teq %2, #0\n" \ 3606da068c1SPeter Zijlstra " bne 1b" \ 3616da068c1SPeter Zijlstra : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \ 3626da068c1SPeter Zijlstra : "r" (&v->counter), "r" (i) \ 3636da068c1SPeter Zijlstra : "cc"); \ 3646da068c1SPeter Zijlstra \ 3656da068c1SPeter Zijlstra return result; \ 3666da068c1SPeter Zijlstra } 3676da068c1SPeter Zijlstra 368aee9a554SPeter Zijlstra #define ATOMIC64_OPS(op, op1, op2) \ 369aee9a554SPeter Zijlstra ATOMIC64_OP(op, op1, op2) \ 3706da068c1SPeter Zijlstra ATOMIC64_OP_RETURN(op, op1, op2) \ 3716da068c1SPeter Zijlstra ATOMIC64_FETCH_OP(op, op1, op2) 37224b44a66SWill Deacon 373aee9a554SPeter Zijlstra ATOMIC64_OPS(add, adds, adc) 374aee9a554SPeter Zijlstra ATOMIC64_OPS(sub, subs, sbc) 37524b44a66SWill Deacon 376fc63a6e0SMark Rutland #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed 377fc63a6e0SMark Rutland #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed 378fc63a6e0SMark Rutland #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed 379fc63a6e0SMark Rutland #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed 3806da068c1SPeter Zijlstra 3816da068c1SPeter Zijlstra #undef ATOMIC64_OPS 3826da068c1SPeter Zijlstra #define ATOMIC64_OPS(op, op1, op2) \ 3836da068c1SPeter Zijlstra ATOMIC64_OP(op, op1, op2) \ 3846da068c1SPeter Zijlstra ATOMIC64_FETCH_OP(op, op1, op2) 3850ca326deSWill Deacon 386fc63a6e0SMark Rutland #define arch_atomic64_andnot arch_atomic64_andnot 38712589790SPeter Zijlstra 3886da068c1SPeter Zijlstra ATOMIC64_OPS(and, and, and) 3896da068c1SPeter Zijlstra ATOMIC64_OPS(andnot, bic, bic) 3906da068c1SPeter Zijlstra ATOMIC64_OPS(or, orr, orr) 3916da068c1SPeter Zijlstra ATOMIC64_OPS(xor, eor, eor) 3926da068c1SPeter Zijlstra 393fc63a6e0SMark Rutland #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed 394fc63a6e0SMark Rutland #define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed 395fc63a6e0SMark Rutland #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed 396fc63a6e0SMark Rutland #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed 39712589790SPeter Zijlstra 398aee9a554SPeter Zijlstra #undef ATOMIC64_OPS 3996da068c1SPeter Zijlstra #undef ATOMIC64_FETCH_OP 400aee9a554SPeter Zijlstra #undef ATOMIC64_OP_RETURN 401aee9a554SPeter Zijlstra #undef ATOMIC64_OP 40224b44a66SWill Deacon 403fc63a6e0SMark Rutland static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new) 40424b44a66SWill Deacon { 405ef4cdc09SMark Rutland s64 oldval; 40624b44a66SWill Deacon unsigned long res; 40724b44a66SWill Deacon 408c32ffce0SWill Deacon prefetchw(&ptr->counter); 40924b44a66SWill Deacon 41024b44a66SWill Deacon do { 41124b44a66SWill Deacon __asm__ __volatile__("@ atomic64_cmpxchg\n" 412398aa668SWill Deacon "ldrexd %1, %H1, [%3]\n" 41324b44a66SWill Deacon "mov %0, #0\n" 414398aa668SWill Deacon "teq %1, %4\n" 415398aa668SWill Deacon "teqeq %H1, %H4\n" 416398aa668SWill Deacon "strexdeq %0, %5, %H5, [%3]" 417398aa668SWill Deacon : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) 41824b44a66SWill Deacon : "r" (&ptr->counter), "r" (old), "r" (new) 41924b44a66SWill Deacon : "cc"); 42024b44a66SWill Deacon } while (res); 42124b44a66SWill Deacon 42224b44a66SWill Deacon return oldval; 42324b44a66SWill Deacon } 424fc63a6e0SMark Rutland #define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed 42524b44a66SWill Deacon 426fc63a6e0SMark Rutland static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new) 42724b44a66SWill Deacon { 428ef4cdc09SMark Rutland s64 result; 42924b44a66SWill Deacon unsigned long tmp; 43024b44a66SWill Deacon 431c32ffce0SWill Deacon prefetchw(&ptr->counter); 43224b44a66SWill Deacon 43324b44a66SWill Deacon __asm__ __volatile__("@ atomic64_xchg\n" 434398aa668SWill Deacon "1: ldrexd %0, %H0, [%3]\n" 435398aa668SWill Deacon " strexd %1, %4, %H4, [%3]\n" 43624b44a66SWill Deacon " teq %1, #0\n" 43724b44a66SWill Deacon " bne 1b" 438398aa668SWill Deacon : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter) 43924b44a66SWill Deacon : "r" (&ptr->counter), "r" (new) 44024b44a66SWill Deacon : "cc"); 44124b44a66SWill Deacon 44224b44a66SWill Deacon return result; 44324b44a66SWill Deacon } 444fc63a6e0SMark Rutland #define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed 44524b44a66SWill Deacon 446fc63a6e0SMark Rutland static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) 44724b44a66SWill Deacon { 448ef4cdc09SMark Rutland s64 result; 44924b44a66SWill Deacon unsigned long tmp; 45024b44a66SWill Deacon 45124b44a66SWill Deacon smp_mb(); 452c32ffce0SWill Deacon prefetchw(&v->counter); 45324b44a66SWill Deacon 45424b44a66SWill Deacon __asm__ __volatile__("@ atomic64_dec_if_positive\n" 455398aa668SWill Deacon "1: ldrexd %0, %H0, [%3]\n" 4562245f924SVictor Kamensky " subs %Q0, %Q0, #1\n" 4572245f924SVictor Kamensky " sbc %R0, %R0, #0\n" 4582245f924SVictor Kamensky " teq %R0, #0\n" 45924b44a66SWill Deacon " bmi 2f\n" 460398aa668SWill Deacon " strexd %1, %0, %H0, [%3]\n" 46124b44a66SWill Deacon " teq %1, #0\n" 46224b44a66SWill Deacon " bne 1b\n" 46324b44a66SWill Deacon "2:" 464398aa668SWill Deacon : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 46524b44a66SWill Deacon : "r" (&v->counter) 46624b44a66SWill Deacon : "cc"); 46724b44a66SWill Deacon 46824b44a66SWill Deacon smp_mb(); 46924b44a66SWill Deacon 47024b44a66SWill Deacon return result; 47124b44a66SWill Deacon } 472fc63a6e0SMark Rutland #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive 47324b44a66SWill Deacon 474fc63a6e0SMark Rutland static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) 47524b44a66SWill Deacon { 476ef4cdc09SMark Rutland s64 oldval, newval; 47724b44a66SWill Deacon unsigned long tmp; 47824b44a66SWill Deacon 47924b44a66SWill Deacon smp_mb(); 480c32ffce0SWill Deacon prefetchw(&v->counter); 48124b44a66SWill Deacon 48224b44a66SWill Deacon __asm__ __volatile__("@ atomic64_add_unless\n" 483398aa668SWill Deacon "1: ldrexd %0, %H0, [%4]\n" 484398aa668SWill Deacon " teq %0, %5\n" 485398aa668SWill Deacon " teqeq %H0, %H5\n" 48624b44a66SWill Deacon " beq 2f\n" 487fee8ca9fSMark Rutland " adds %Q1, %Q0, %Q6\n" 488fee8ca9fSMark Rutland " adc %R1, %R0, %R6\n" 489fee8ca9fSMark Rutland " strexd %2, %1, %H1, [%4]\n" 49024b44a66SWill Deacon " teq %2, #0\n" 49124b44a66SWill Deacon " bne 1b\n" 49224b44a66SWill Deacon "2:" 493fee8ca9fSMark Rutland : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) 49424b44a66SWill Deacon : "r" (&v->counter), "r" (u), "r" (a) 49524b44a66SWill Deacon : "cc"); 49624b44a66SWill Deacon 497fee8ca9fSMark Rutland if (oldval != u) 49824b44a66SWill Deacon smp_mb(); 49924b44a66SWill Deacon 500fee8ca9fSMark Rutland return oldval; 50124b44a66SWill Deacon } 502fc63a6e0SMark Rutland #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless 50324b44a66SWill Deacon 5047847777aSArun Sharma #endif /* !CONFIG_GENERIC_ATOMIC64 */ 5054baa9922SRussell King #endif 5064baa9922SRussell King #endif 507