14baa9922SRussell King /* 24baa9922SRussell King * arch/arm/include/asm/atomic.h 34baa9922SRussell King * 44baa9922SRussell King * Copyright (C) 1996 Russell King. 54baa9922SRussell King * Copyright (C) 2002 Deep Blue Solutions Ltd. 64baa9922SRussell King * 74baa9922SRussell King * This program is free software; you can redistribute it and/or modify 84baa9922SRussell King * it under the terms of the GNU General Public License version 2 as 94baa9922SRussell King * published by the Free Software Foundation. 104baa9922SRussell King */ 114baa9922SRussell King #ifndef __ASM_ARM_ATOMIC_H 124baa9922SRussell King #define __ASM_ARM_ATOMIC_H 134baa9922SRussell King 144baa9922SRussell King #include <linux/compiler.h> 15ea435467SMatthew Wilcox #include <linux/types.h> 164baa9922SRussell King #include <asm/system.h> 174baa9922SRussell King 184baa9922SRussell King #define ATOMIC_INIT(i) { (i) } 194baa9922SRussell King 204baa9922SRussell King #ifdef __KERNEL__ 214baa9922SRussell King 22200b812dSCatalin Marinas /* 23200b812dSCatalin Marinas * On ARM, ordinary assignment (str instruction) doesn't clear the local 24200b812dSCatalin Marinas * strex/ldrex monitor on some implementations. The reason we can use it for 25200b812dSCatalin Marinas * atomic_set() is the clrex or dummy strex done on every exception return. 26200b812dSCatalin Marinas */ 27*f3d46f9dSAnton Blanchard #define atomic_read(v) (*(volatile int *)&(v)->counter) 28200b812dSCatalin Marinas #define atomic_set(v,i) (((v)->counter) = (i)) 294baa9922SRussell King 304baa9922SRussell King #if __LINUX_ARM_ARCH__ >= 6 314baa9922SRussell King 324baa9922SRussell King /* 334baa9922SRussell King * ARMv6 UP and SMP safe atomic ops. We use load exclusive and 344baa9922SRussell King * store exclusive to ensure that these are atomic. We may loop 35200b812dSCatalin Marinas * to ensure that the update happens. 364baa9922SRussell King */ 37bac4e960SRussell King static inline void atomic_add(int i, atomic_t *v) 38bac4e960SRussell King { 39bac4e960SRussell King unsigned long tmp; 40bac4e960SRussell King int result; 41bac4e960SRussell King 42bac4e960SRussell King __asm__ __volatile__("@ atomic_add\n" 43bac4e960SRussell King "1: ldrex %0, [%2]\n" 44bac4e960SRussell King " add %0, %0, %3\n" 45bac4e960SRussell King " strex %1, %0, [%2]\n" 46bac4e960SRussell King " teq %1, #0\n" 47bac4e960SRussell King " bne 1b" 48bac4e960SRussell King : "=&r" (result), "=&r" (tmp) 49bac4e960SRussell King : "r" (&v->counter), "Ir" (i) 50bac4e960SRussell King : "cc"); 51bac4e960SRussell King } 52bac4e960SRussell King 534baa9922SRussell King static inline int atomic_add_return(int i, atomic_t *v) 544baa9922SRussell King { 554baa9922SRussell King unsigned long tmp; 564baa9922SRussell King int result; 574baa9922SRussell King 58bac4e960SRussell King smp_mb(); 59bac4e960SRussell King 604baa9922SRussell King __asm__ __volatile__("@ atomic_add_return\n" 614baa9922SRussell King "1: ldrex %0, [%2]\n" 624baa9922SRussell King " add %0, %0, %3\n" 634baa9922SRussell King " strex %1, %0, [%2]\n" 644baa9922SRussell King " teq %1, #0\n" 654baa9922SRussell King " bne 1b" 664baa9922SRussell King : "=&r" (result), "=&r" (tmp) 674baa9922SRussell King : "r" (&v->counter), "Ir" (i) 684baa9922SRussell King : "cc"); 694baa9922SRussell King 70bac4e960SRussell King smp_mb(); 71bac4e960SRussell King 724baa9922SRussell King return result; 734baa9922SRussell King } 744baa9922SRussell King 75bac4e960SRussell King static inline void atomic_sub(int i, atomic_t *v) 76bac4e960SRussell King { 77bac4e960SRussell King unsigned long tmp; 78bac4e960SRussell King int result; 79bac4e960SRussell King 80bac4e960SRussell King __asm__ __volatile__("@ atomic_sub\n" 81bac4e960SRussell King "1: ldrex %0, [%2]\n" 82bac4e960SRussell King " sub %0, %0, %3\n" 83bac4e960SRussell King " strex %1, %0, [%2]\n" 84bac4e960SRussell King " teq %1, #0\n" 85bac4e960SRussell King " bne 1b" 86bac4e960SRussell King : "=&r" (result), "=&r" (tmp) 87bac4e960SRussell King : "r" (&v->counter), "Ir" (i) 88bac4e960SRussell King : "cc"); 89bac4e960SRussell King } 90bac4e960SRussell King 914baa9922SRussell King static inline int atomic_sub_return(int i, atomic_t *v) 924baa9922SRussell King { 934baa9922SRussell King unsigned long tmp; 944baa9922SRussell King int result; 954baa9922SRussell King 96bac4e960SRussell King smp_mb(); 97bac4e960SRussell King 984baa9922SRussell King __asm__ __volatile__("@ atomic_sub_return\n" 994baa9922SRussell King "1: ldrex %0, [%2]\n" 1004baa9922SRussell King " sub %0, %0, %3\n" 1014baa9922SRussell King " strex %1, %0, [%2]\n" 1024baa9922SRussell King " teq %1, #0\n" 1034baa9922SRussell King " bne 1b" 1044baa9922SRussell King : "=&r" (result), "=&r" (tmp) 1054baa9922SRussell King : "r" (&v->counter), "Ir" (i) 1064baa9922SRussell King : "cc"); 1074baa9922SRussell King 108bac4e960SRussell King smp_mb(); 109bac4e960SRussell King 1104baa9922SRussell King return result; 1114baa9922SRussell King } 1124baa9922SRussell King 1134baa9922SRussell King static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) 1144baa9922SRussell King { 1154baa9922SRussell King unsigned long oldval, res; 1164baa9922SRussell King 117bac4e960SRussell King smp_mb(); 118bac4e960SRussell King 1194baa9922SRussell King do { 1204baa9922SRussell King __asm__ __volatile__("@ atomic_cmpxchg\n" 1214baa9922SRussell King "ldrex %1, [%2]\n" 1224baa9922SRussell King "mov %0, #0\n" 1234baa9922SRussell King "teq %1, %3\n" 1244baa9922SRussell King "strexeq %0, %4, [%2]\n" 1254baa9922SRussell King : "=&r" (res), "=&r" (oldval) 1264baa9922SRussell King : "r" (&ptr->counter), "Ir" (old), "r" (new) 1274baa9922SRussell King : "cc"); 1284baa9922SRussell King } while (res); 1294baa9922SRussell King 130bac4e960SRussell King smp_mb(); 131bac4e960SRussell King 1324baa9922SRussell King return oldval; 1334baa9922SRussell King } 1344baa9922SRussell King 1354baa9922SRussell King static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) 1364baa9922SRussell King { 1374baa9922SRussell King unsigned long tmp, tmp2; 1384baa9922SRussell King 1394baa9922SRussell King __asm__ __volatile__("@ atomic_clear_mask\n" 1404baa9922SRussell King "1: ldrex %0, [%2]\n" 1414baa9922SRussell King " bic %0, %0, %3\n" 1424baa9922SRussell King " strex %1, %0, [%2]\n" 1434baa9922SRussell King " teq %1, #0\n" 1444baa9922SRussell King " bne 1b" 1454baa9922SRussell King : "=&r" (tmp), "=&r" (tmp2) 1464baa9922SRussell King : "r" (addr), "Ir" (mask) 1474baa9922SRussell King : "cc"); 1484baa9922SRussell King } 1494baa9922SRussell King 1504baa9922SRussell King #else /* ARM_ARCH_6 */ 1514baa9922SRussell King 1524baa9922SRussell King #ifdef CONFIG_SMP 1534baa9922SRussell King #error SMP not supported on pre-ARMv6 CPUs 1544baa9922SRussell King #endif 1554baa9922SRussell King 1564baa9922SRussell King static inline int atomic_add_return(int i, atomic_t *v) 1574baa9922SRussell King { 1584baa9922SRussell King unsigned long flags; 1594baa9922SRussell King int val; 1604baa9922SRussell King 1614baa9922SRussell King raw_local_irq_save(flags); 1624baa9922SRussell King val = v->counter; 1634baa9922SRussell King v->counter = val += i; 1644baa9922SRussell King raw_local_irq_restore(flags); 1654baa9922SRussell King 1664baa9922SRussell King return val; 1674baa9922SRussell King } 168bac4e960SRussell King #define atomic_add(i, v) (void) atomic_add_return(i, v) 1694baa9922SRussell King 1704baa9922SRussell King static inline int atomic_sub_return(int i, atomic_t *v) 1714baa9922SRussell King { 1724baa9922SRussell King unsigned long flags; 1734baa9922SRussell King int val; 1744baa9922SRussell King 1754baa9922SRussell King raw_local_irq_save(flags); 1764baa9922SRussell King val = v->counter; 1774baa9922SRussell King v->counter = val -= i; 1784baa9922SRussell King raw_local_irq_restore(flags); 1794baa9922SRussell King 1804baa9922SRussell King return val; 1814baa9922SRussell King } 182bac4e960SRussell King #define atomic_sub(i, v) (void) atomic_sub_return(i, v) 1834baa9922SRussell King 1844baa9922SRussell King static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 1854baa9922SRussell King { 1864baa9922SRussell King int ret; 1874baa9922SRussell King unsigned long flags; 1884baa9922SRussell King 1894baa9922SRussell King raw_local_irq_save(flags); 1904baa9922SRussell King ret = v->counter; 1914baa9922SRussell King if (likely(ret == old)) 1924baa9922SRussell King v->counter = new; 1934baa9922SRussell King raw_local_irq_restore(flags); 1944baa9922SRussell King 1954baa9922SRussell King return ret; 1964baa9922SRussell King } 1974baa9922SRussell King 1984baa9922SRussell King static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) 1994baa9922SRussell King { 2004baa9922SRussell King unsigned long flags; 2014baa9922SRussell King 2024baa9922SRussell King raw_local_irq_save(flags); 2034baa9922SRussell King *addr &= ~mask; 2044baa9922SRussell King raw_local_irq_restore(flags); 2054baa9922SRussell King } 2064baa9922SRussell King 2074baa9922SRussell King #endif /* __LINUX_ARM_ARCH__ */ 2084baa9922SRussell King 2094baa9922SRussell King #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 2104baa9922SRussell King 2114baa9922SRussell King static inline int atomic_add_unless(atomic_t *v, int a, int u) 2124baa9922SRussell King { 2134baa9922SRussell King int c, old; 2144baa9922SRussell King 2154baa9922SRussell King c = atomic_read(v); 2164baa9922SRussell King while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) 2174baa9922SRussell King c = old; 2184baa9922SRussell King return c != u; 2194baa9922SRussell King } 2204baa9922SRussell King #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 2214baa9922SRussell King 222bac4e960SRussell King #define atomic_inc(v) atomic_add(1, v) 223bac4e960SRussell King #define atomic_dec(v) atomic_sub(1, v) 2244baa9922SRussell King 2254baa9922SRussell King #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) 2264baa9922SRussell King #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) 2274baa9922SRussell King #define atomic_inc_return(v) (atomic_add_return(1, v)) 2284baa9922SRussell King #define atomic_dec_return(v) (atomic_sub_return(1, v)) 2294baa9922SRussell King #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) 2304baa9922SRussell King 2314baa9922SRussell King #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) 2324baa9922SRussell King 233bac4e960SRussell King #define smp_mb__before_atomic_dec() smp_mb() 234bac4e960SRussell King #define smp_mb__after_atomic_dec() smp_mb() 235bac4e960SRussell King #define smp_mb__before_atomic_inc() smp_mb() 236bac4e960SRussell King #define smp_mb__after_atomic_inc() smp_mb() 2374baa9922SRussell King 23824b44a66SWill Deacon #ifndef CONFIG_GENERIC_ATOMIC64 23924b44a66SWill Deacon typedef struct { 24024b44a66SWill Deacon u64 __aligned(8) counter; 24124b44a66SWill Deacon } atomic64_t; 24224b44a66SWill Deacon 24324b44a66SWill Deacon #define ATOMIC64_INIT(i) { (i) } 24424b44a66SWill Deacon 24524b44a66SWill Deacon static inline u64 atomic64_read(atomic64_t *v) 24624b44a66SWill Deacon { 24724b44a66SWill Deacon u64 result; 24824b44a66SWill Deacon 24924b44a66SWill Deacon __asm__ __volatile__("@ atomic64_read\n" 25024b44a66SWill Deacon " ldrexd %0, %H0, [%1]" 25124b44a66SWill Deacon : "=&r" (result) 25224b44a66SWill Deacon : "r" (&v->counter) 25324b44a66SWill Deacon ); 25424b44a66SWill Deacon 25524b44a66SWill Deacon return result; 25624b44a66SWill Deacon } 25724b44a66SWill Deacon 25824b44a66SWill Deacon static inline void atomic64_set(atomic64_t *v, u64 i) 25924b44a66SWill Deacon { 26024b44a66SWill Deacon u64 tmp; 26124b44a66SWill Deacon 26224b44a66SWill Deacon __asm__ __volatile__("@ atomic64_set\n" 26324b44a66SWill Deacon "1: ldrexd %0, %H0, [%1]\n" 26424b44a66SWill Deacon " strexd %0, %2, %H2, [%1]\n" 26524b44a66SWill Deacon " teq %0, #0\n" 26624b44a66SWill Deacon " bne 1b" 26724b44a66SWill Deacon : "=&r" (tmp) 26824b44a66SWill Deacon : "r" (&v->counter), "r" (i) 26924b44a66SWill Deacon : "cc"); 27024b44a66SWill Deacon } 27124b44a66SWill Deacon 27224b44a66SWill Deacon static inline void atomic64_add(u64 i, atomic64_t *v) 27324b44a66SWill Deacon { 27424b44a66SWill Deacon u64 result; 27524b44a66SWill Deacon unsigned long tmp; 27624b44a66SWill Deacon 27724b44a66SWill Deacon __asm__ __volatile__("@ atomic64_add\n" 27824b44a66SWill Deacon "1: ldrexd %0, %H0, [%2]\n" 27924b44a66SWill Deacon " adds %0, %0, %3\n" 28024b44a66SWill Deacon " adc %H0, %H0, %H3\n" 28124b44a66SWill Deacon " strexd %1, %0, %H0, [%2]\n" 28224b44a66SWill Deacon " teq %1, #0\n" 28324b44a66SWill Deacon " bne 1b" 28424b44a66SWill Deacon : "=&r" (result), "=&r" (tmp) 28524b44a66SWill Deacon : "r" (&v->counter), "r" (i) 28624b44a66SWill Deacon : "cc"); 28724b44a66SWill Deacon } 28824b44a66SWill Deacon 28924b44a66SWill Deacon static inline u64 atomic64_add_return(u64 i, atomic64_t *v) 29024b44a66SWill Deacon { 29124b44a66SWill Deacon u64 result; 29224b44a66SWill Deacon unsigned long tmp; 29324b44a66SWill Deacon 29424b44a66SWill Deacon smp_mb(); 29524b44a66SWill Deacon 29624b44a66SWill Deacon __asm__ __volatile__("@ atomic64_add_return\n" 29724b44a66SWill Deacon "1: ldrexd %0, %H0, [%2]\n" 29824b44a66SWill Deacon " adds %0, %0, %3\n" 29924b44a66SWill Deacon " adc %H0, %H0, %H3\n" 30024b44a66SWill Deacon " strexd %1, %0, %H0, [%2]\n" 30124b44a66SWill Deacon " teq %1, #0\n" 30224b44a66SWill Deacon " bne 1b" 30324b44a66SWill Deacon : "=&r" (result), "=&r" (tmp) 30424b44a66SWill Deacon : "r" (&v->counter), "r" (i) 30524b44a66SWill Deacon : "cc"); 30624b44a66SWill Deacon 30724b44a66SWill Deacon smp_mb(); 30824b44a66SWill Deacon 30924b44a66SWill Deacon return result; 31024b44a66SWill Deacon } 31124b44a66SWill Deacon 31224b44a66SWill Deacon static inline void atomic64_sub(u64 i, atomic64_t *v) 31324b44a66SWill Deacon { 31424b44a66SWill Deacon u64 result; 31524b44a66SWill Deacon unsigned long tmp; 31624b44a66SWill Deacon 31724b44a66SWill Deacon __asm__ __volatile__("@ atomic64_sub\n" 31824b44a66SWill Deacon "1: ldrexd %0, %H0, [%2]\n" 31924b44a66SWill Deacon " subs %0, %0, %3\n" 32024b44a66SWill Deacon " sbc %H0, %H0, %H3\n" 32124b44a66SWill Deacon " strexd %1, %0, %H0, [%2]\n" 32224b44a66SWill Deacon " teq %1, #0\n" 32324b44a66SWill Deacon " bne 1b" 32424b44a66SWill Deacon : "=&r" (result), "=&r" (tmp) 32524b44a66SWill Deacon : "r" (&v->counter), "r" (i) 32624b44a66SWill Deacon : "cc"); 32724b44a66SWill Deacon } 32824b44a66SWill Deacon 32924b44a66SWill Deacon static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) 33024b44a66SWill Deacon { 33124b44a66SWill Deacon u64 result; 33224b44a66SWill Deacon unsigned long tmp; 33324b44a66SWill Deacon 33424b44a66SWill Deacon smp_mb(); 33524b44a66SWill Deacon 33624b44a66SWill Deacon __asm__ __volatile__("@ atomic64_sub_return\n" 33724b44a66SWill Deacon "1: ldrexd %0, %H0, [%2]\n" 33824b44a66SWill Deacon " subs %0, %0, %3\n" 33924b44a66SWill Deacon " sbc %H0, %H0, %H3\n" 34024b44a66SWill Deacon " strexd %1, %0, %H0, [%2]\n" 34124b44a66SWill Deacon " teq %1, #0\n" 34224b44a66SWill Deacon " bne 1b" 34324b44a66SWill Deacon : "=&r" (result), "=&r" (tmp) 34424b44a66SWill Deacon : "r" (&v->counter), "r" (i) 34524b44a66SWill Deacon : "cc"); 34624b44a66SWill Deacon 34724b44a66SWill Deacon smp_mb(); 34824b44a66SWill Deacon 34924b44a66SWill Deacon return result; 35024b44a66SWill Deacon } 35124b44a66SWill Deacon 35224b44a66SWill Deacon static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) 35324b44a66SWill Deacon { 35424b44a66SWill Deacon u64 oldval; 35524b44a66SWill Deacon unsigned long res; 35624b44a66SWill Deacon 35724b44a66SWill Deacon smp_mb(); 35824b44a66SWill Deacon 35924b44a66SWill Deacon do { 36024b44a66SWill Deacon __asm__ __volatile__("@ atomic64_cmpxchg\n" 36124b44a66SWill Deacon "ldrexd %1, %H1, [%2]\n" 36224b44a66SWill Deacon "mov %0, #0\n" 36324b44a66SWill Deacon "teq %1, %3\n" 36424b44a66SWill Deacon "teqeq %H1, %H3\n" 36524b44a66SWill Deacon "strexdeq %0, %4, %H4, [%2]" 36624b44a66SWill Deacon : "=&r" (res), "=&r" (oldval) 36724b44a66SWill Deacon : "r" (&ptr->counter), "r" (old), "r" (new) 36824b44a66SWill Deacon : "cc"); 36924b44a66SWill Deacon } while (res); 37024b44a66SWill Deacon 37124b44a66SWill Deacon smp_mb(); 37224b44a66SWill Deacon 37324b44a66SWill Deacon return oldval; 37424b44a66SWill Deacon } 37524b44a66SWill Deacon 37624b44a66SWill Deacon static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) 37724b44a66SWill Deacon { 37824b44a66SWill Deacon u64 result; 37924b44a66SWill Deacon unsigned long tmp; 38024b44a66SWill Deacon 38124b44a66SWill Deacon smp_mb(); 38224b44a66SWill Deacon 38324b44a66SWill Deacon __asm__ __volatile__("@ atomic64_xchg\n" 38424b44a66SWill Deacon "1: ldrexd %0, %H0, [%2]\n" 38524b44a66SWill Deacon " strexd %1, %3, %H3, [%2]\n" 38624b44a66SWill Deacon " teq %1, #0\n" 38724b44a66SWill Deacon " bne 1b" 38824b44a66SWill Deacon : "=&r" (result), "=&r" (tmp) 38924b44a66SWill Deacon : "r" (&ptr->counter), "r" (new) 39024b44a66SWill Deacon : "cc"); 39124b44a66SWill Deacon 39224b44a66SWill Deacon smp_mb(); 39324b44a66SWill Deacon 39424b44a66SWill Deacon return result; 39524b44a66SWill Deacon } 39624b44a66SWill Deacon 39724b44a66SWill Deacon static inline u64 atomic64_dec_if_positive(atomic64_t *v) 39824b44a66SWill Deacon { 39924b44a66SWill Deacon u64 result; 40024b44a66SWill Deacon unsigned long tmp; 40124b44a66SWill Deacon 40224b44a66SWill Deacon smp_mb(); 40324b44a66SWill Deacon 40424b44a66SWill Deacon __asm__ __volatile__("@ atomic64_dec_if_positive\n" 40524b44a66SWill Deacon "1: ldrexd %0, %H0, [%2]\n" 40624b44a66SWill Deacon " subs %0, %0, #1\n" 40724b44a66SWill Deacon " sbc %H0, %H0, #0\n" 40824b44a66SWill Deacon " teq %H0, #0\n" 40924b44a66SWill Deacon " bmi 2f\n" 41024b44a66SWill Deacon " strexd %1, %0, %H0, [%2]\n" 41124b44a66SWill Deacon " teq %1, #0\n" 41224b44a66SWill Deacon " bne 1b\n" 41324b44a66SWill Deacon "2:" 41424b44a66SWill Deacon : "=&r" (result), "=&r" (tmp) 41524b44a66SWill Deacon : "r" (&v->counter) 41624b44a66SWill Deacon : "cc"); 41724b44a66SWill Deacon 41824b44a66SWill Deacon smp_mb(); 41924b44a66SWill Deacon 42024b44a66SWill Deacon return result; 42124b44a66SWill Deacon } 42224b44a66SWill Deacon 42324b44a66SWill Deacon static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) 42424b44a66SWill Deacon { 42524b44a66SWill Deacon u64 val; 42624b44a66SWill Deacon unsigned long tmp; 42724b44a66SWill Deacon int ret = 1; 42824b44a66SWill Deacon 42924b44a66SWill Deacon smp_mb(); 43024b44a66SWill Deacon 43124b44a66SWill Deacon __asm__ __volatile__("@ atomic64_add_unless\n" 43224b44a66SWill Deacon "1: ldrexd %0, %H0, [%3]\n" 43324b44a66SWill Deacon " teq %0, %4\n" 43424b44a66SWill Deacon " teqeq %H0, %H4\n" 43524b44a66SWill Deacon " moveq %1, #0\n" 43624b44a66SWill Deacon " beq 2f\n" 43724b44a66SWill Deacon " adds %0, %0, %5\n" 43824b44a66SWill Deacon " adc %H0, %H0, %H5\n" 43924b44a66SWill Deacon " strexd %2, %0, %H0, [%3]\n" 44024b44a66SWill Deacon " teq %2, #0\n" 44124b44a66SWill Deacon " bne 1b\n" 44224b44a66SWill Deacon "2:" 44324b44a66SWill Deacon : "=&r" (val), "=&r" (ret), "=&r" (tmp) 44424b44a66SWill Deacon : "r" (&v->counter), "r" (u), "r" (a) 44524b44a66SWill Deacon : "cc"); 44624b44a66SWill Deacon 44724b44a66SWill Deacon if (ret) 44824b44a66SWill Deacon smp_mb(); 44924b44a66SWill Deacon 45024b44a66SWill Deacon return ret; 45124b44a66SWill Deacon } 45224b44a66SWill Deacon 45324b44a66SWill Deacon #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) 45424b44a66SWill Deacon #define atomic64_inc(v) atomic64_add(1LL, (v)) 45524b44a66SWill Deacon #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) 45624b44a66SWill Deacon #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) 45724b44a66SWill Deacon #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) 45824b44a66SWill Deacon #define atomic64_dec(v) atomic64_sub(1LL, (v)) 45924b44a66SWill Deacon #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) 46024b44a66SWill Deacon #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) 46124b44a66SWill Deacon #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) 46224b44a66SWill Deacon 46324b44a66SWill Deacon #else /* !CONFIG_GENERIC_ATOMIC64 */ 46424b44a66SWill Deacon #include <asm-generic/atomic64.h> 46524b44a66SWill Deacon #endif 46672099ed2SArnd Bergmann #include <asm-generic/atomic-long.h> 4674baa9922SRussell King #endif 4684baa9922SRussell King #endif 469