1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
29f97da78SDavid Howells #ifndef __ASM_ARM_CMPXCHG_H
39f97da78SDavid Howells #define __ASM_ARM_CMPXCHG_H
49f97da78SDavid Howells
59f97da78SDavid Howells #include <linux/irqflags.h>
6c32ffce0SWill Deacon #include <linux/prefetch.h>
79f97da78SDavid Howells #include <asm/barrier.h>
89f97da78SDavid Howells
99f97da78SDavid Howells #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
109f97da78SDavid Howells /*
119f97da78SDavid Howells * On the StrongARM, "swp" is terminally broken since it bypasses the
129f97da78SDavid Howells * cache totally. This means that the cache becomes inconsistent, and,
139f97da78SDavid Howells * since we use normal loads/stores as well, this is really bad.
149f97da78SDavid Howells * Typically, this causes oopsen in filp_close, but could have other,
159f97da78SDavid Howells * more disastrous effects. There are two work-arounds:
169f97da78SDavid Howells * 1. Disable interrupts and emulate the atomic swap
179f97da78SDavid Howells * 2. Clean the cache, perform atomic swap, flush the cache
189f97da78SDavid Howells *
199f97da78SDavid Howells * We choose (1) since its the "easiest" to achieve here and is not
209f97da78SDavid Howells * dependent on the processor type.
219f97da78SDavid Howells *
229f97da78SDavid Howells * NOTE that this solution won't work on an SMP system, so explcitly
239f97da78SDavid Howells * forbid it here.
249f97da78SDavid Howells */
259f97da78SDavid Howells #define swp_is_buggy
269f97da78SDavid Howells #endif
279f97da78SDavid Howells
28*06855063SAndrzej Hajda static inline unsigned long
__arch_xchg(unsigned long x,volatile void * ptr,int size)29*06855063SAndrzej Hajda __arch_xchg(unsigned long x, volatile void *ptr, int size)
309f97da78SDavid Howells {
319f97da78SDavid Howells extern void __bad_xchg(volatile void *, int);
329f97da78SDavid Howells unsigned long ret;
339f97da78SDavid Howells #ifdef swp_is_buggy
349f97da78SDavid Howells unsigned long flags;
359f97da78SDavid Howells #endif
369f97da78SDavid Howells #if __LINUX_ARM_ARCH__ >= 6
379f97da78SDavid Howells unsigned int tmp;
389f97da78SDavid Howells #endif
399f97da78SDavid Howells
40c32ffce0SWill Deacon prefetchw((const void *)ptr);
419f97da78SDavid Howells
429f97da78SDavid Howells switch (size) {
439f97da78SDavid Howells #if __LINUX_ARM_ARCH__ >= 6
44e8973a88SSarbojit Ganguly #ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */
459f97da78SDavid Howells case 1:
469f97da78SDavid Howells asm volatile("@ __xchg1\n"
479f97da78SDavid Howells "1: ldrexb %0, [%3]\n"
489f97da78SDavid Howells " strexb %1, %2, [%3]\n"
499f97da78SDavid Howells " teq %1, #0\n"
509f97da78SDavid Howells " bne 1b"
519f97da78SDavid Howells : "=&r" (ret), "=&r" (tmp)
529f97da78SDavid Howells : "r" (x), "r" (ptr)
539f97da78SDavid Howells : "memory", "cc");
549f97da78SDavid Howells break;
55e8973a88SSarbojit Ganguly case 2:
56e8973a88SSarbojit Ganguly asm volatile("@ __xchg2\n"
57e8973a88SSarbojit Ganguly "1: ldrexh %0, [%3]\n"
58e8973a88SSarbojit Ganguly " strexh %1, %2, [%3]\n"
59e8973a88SSarbojit Ganguly " teq %1, #0\n"
60e8973a88SSarbojit Ganguly " bne 1b"
61e8973a88SSarbojit Ganguly : "=&r" (ret), "=&r" (tmp)
62e8973a88SSarbojit Ganguly : "r" (x), "r" (ptr)
63e8973a88SSarbojit Ganguly : "memory", "cc");
64e8973a88SSarbojit Ganguly break;
65e8973a88SSarbojit Ganguly #endif
669f97da78SDavid Howells case 4:
679f97da78SDavid Howells asm volatile("@ __xchg4\n"
689f97da78SDavid Howells "1: ldrex %0, [%3]\n"
699f97da78SDavid Howells " strex %1, %2, [%3]\n"
709f97da78SDavid Howells " teq %1, #0\n"
719f97da78SDavid Howells " bne 1b"
729f97da78SDavid Howells : "=&r" (ret), "=&r" (tmp)
739f97da78SDavid Howells : "r" (x), "r" (ptr)
749f97da78SDavid Howells : "memory", "cc");
759f97da78SDavid Howells break;
769f97da78SDavid Howells #elif defined(swp_is_buggy)
779f97da78SDavid Howells #ifdef CONFIG_SMP
789f97da78SDavid Howells #error SMP is not supported on this platform
799f97da78SDavid Howells #endif
809f97da78SDavid Howells case 1:
819f97da78SDavid Howells raw_local_irq_save(flags);
829f97da78SDavid Howells ret = *(volatile unsigned char *)ptr;
839f97da78SDavid Howells *(volatile unsigned char *)ptr = x;
849f97da78SDavid Howells raw_local_irq_restore(flags);
859f97da78SDavid Howells break;
869f97da78SDavid Howells
879f97da78SDavid Howells case 4:
889f97da78SDavid Howells raw_local_irq_save(flags);
899f97da78SDavid Howells ret = *(volatile unsigned long *)ptr;
909f97da78SDavid Howells *(volatile unsigned long *)ptr = x;
919f97da78SDavid Howells raw_local_irq_restore(flags);
929f97da78SDavid Howells break;
939f97da78SDavid Howells #else
949f97da78SDavid Howells case 1:
959f97da78SDavid Howells asm volatile("@ __xchg1\n"
969f97da78SDavid Howells " swpb %0, %1, [%2]"
979f97da78SDavid Howells : "=&r" (ret)
989f97da78SDavid Howells : "r" (x), "r" (ptr)
999f97da78SDavid Howells : "memory", "cc");
1009f97da78SDavid Howells break;
1019f97da78SDavid Howells case 4:
1029f97da78SDavid Howells asm volatile("@ __xchg4\n"
1039f97da78SDavid Howells " swp %0, %1, [%2]"
1049f97da78SDavid Howells : "=&r" (ret)
1059f97da78SDavid Howells : "r" (x), "r" (ptr)
1069f97da78SDavid Howells : "memory", "cc");
1079f97da78SDavid Howells break;
1089f97da78SDavid Howells #endif
1099f97da78SDavid Howells default:
11031cd08c3SRussell King /* Cause a link-time error, the xchg() size is not supported */
1119f97da78SDavid Howells __bad_xchg(ptr, size), ret = 0;
1129f97da78SDavid Howells break;
1139f97da78SDavid Howells }
1149f97da78SDavid Howells
1159f97da78SDavid Howells return ret;
1169f97da78SDavid Howells }
1179f97da78SDavid Howells
118fc63a6e0SMark Rutland #define arch_xchg_relaxed(ptr, x) ({ \
119*06855063SAndrzej Hajda (__typeof__(*(ptr)))__arch_xchg((unsigned long)(x), (ptr), \
120e001bbaeSRussell King sizeof(*(ptr))); \
121e001bbaeSRussell King })
1229f97da78SDavid Howells
1239f97da78SDavid Howells #include <asm-generic/cmpxchg-local.h>
1249f97da78SDavid Howells
1259f97da78SDavid Howells #if __LINUX_ARM_ARCH__ < 6
1269f97da78SDavid Howells /* min ARCH < ARMv6 */
1279f97da78SDavid Howells
1289f97da78SDavid Howells #ifdef CONFIG_SMP
1299f97da78SDavid Howells #error "SMP is not supported on this platform"
1309f97da78SDavid Howells #endif
1319f97da78SDavid Howells
132fc63a6e0SMark Rutland #define arch_xchg arch_xchg_relaxed
1330ca326deSWill Deacon
1349f97da78SDavid Howells /*
1359f97da78SDavid Howells * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
1369f97da78SDavid Howells * them available.
1379f97da78SDavid Howells */
138fc63a6e0SMark Rutland #define arch_cmpxchg_local(ptr, o, n) ({ \
1396988631bSMark Rutland (__typeof(*ptr))__generic_cmpxchg_local((ptr), \
140e001bbaeSRussell King (unsigned long)(o), \
141e001bbaeSRussell King (unsigned long)(n), \
142e001bbaeSRussell King sizeof(*(ptr))); \
143e001bbaeSRussell King })
144e001bbaeSRussell King
145fc63a6e0SMark Rutland #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
1469f97da78SDavid Howells
1479f97da78SDavid Howells #include <asm-generic/cmpxchg.h>
1489f97da78SDavid Howells
1499f97da78SDavid Howells #else /* min ARCH >= ARMv6 */
1509f97da78SDavid Howells
1519f97da78SDavid Howells extern void __bad_cmpxchg(volatile void *ptr, int size);
1529f97da78SDavid Howells
1539f97da78SDavid Howells /*
1549f97da78SDavid Howells * cmpxchg only support 32-bits operands on ARMv6.
1559f97da78SDavid Howells */
1569f97da78SDavid Howells
__cmpxchg(volatile void * ptr,unsigned long old,unsigned long new,int size)1579f97da78SDavid Howells static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
1589f97da78SDavid Howells unsigned long new, int size)
1599f97da78SDavid Howells {
1609f97da78SDavid Howells unsigned long oldval, res;
1619f97da78SDavid Howells
162c32ffce0SWill Deacon prefetchw((const void *)ptr);
163c32ffce0SWill Deacon
1649f97da78SDavid Howells switch (size) {
1659f97da78SDavid Howells #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
1669f97da78SDavid Howells case 1:
1679f97da78SDavid Howells do {
1689f97da78SDavid Howells asm volatile("@ __cmpxchg1\n"
1699f97da78SDavid Howells " ldrexb %1, [%2]\n"
1709f97da78SDavid Howells " mov %0, #0\n"
1719f97da78SDavid Howells " teq %1, %3\n"
1729f97da78SDavid Howells " strexbeq %0, %4, [%2]\n"
1739f97da78SDavid Howells : "=&r" (res), "=&r" (oldval)
1749f97da78SDavid Howells : "r" (ptr), "Ir" (old), "r" (new)
1759f97da78SDavid Howells : "memory", "cc");
1769f97da78SDavid Howells } while (res);
1779f97da78SDavid Howells break;
1789f97da78SDavid Howells case 2:
1799f97da78SDavid Howells do {
1809f97da78SDavid Howells asm volatile("@ __cmpxchg1\n"
1819f97da78SDavid Howells " ldrexh %1, [%2]\n"
1829f97da78SDavid Howells " mov %0, #0\n"
1839f97da78SDavid Howells " teq %1, %3\n"
1849f97da78SDavid Howells " strexheq %0, %4, [%2]\n"
1859f97da78SDavid Howells : "=&r" (res), "=&r" (oldval)
1869f97da78SDavid Howells : "r" (ptr), "Ir" (old), "r" (new)
1879f97da78SDavid Howells : "memory", "cc");
1889f97da78SDavid Howells } while (res);
1899f97da78SDavid Howells break;
1909f97da78SDavid Howells #endif
1919f97da78SDavid Howells case 4:
1929f97da78SDavid Howells do {
1939f97da78SDavid Howells asm volatile("@ __cmpxchg4\n"
1949f97da78SDavid Howells " ldrex %1, [%2]\n"
1959f97da78SDavid Howells " mov %0, #0\n"
1969f97da78SDavid Howells " teq %1, %3\n"
1979f97da78SDavid Howells " strexeq %0, %4, [%2]\n"
1989f97da78SDavid Howells : "=&r" (res), "=&r" (oldval)
1999f97da78SDavid Howells : "r" (ptr), "Ir" (old), "r" (new)
2009f97da78SDavid Howells : "memory", "cc");
2019f97da78SDavid Howells } while (res);
2029f97da78SDavid Howells break;
2039f97da78SDavid Howells default:
2049f97da78SDavid Howells __bad_cmpxchg(ptr, size);
2059f97da78SDavid Howells oldval = 0;
2069f97da78SDavid Howells }
2079f97da78SDavid Howells
2089f97da78SDavid Howells return oldval;
2099f97da78SDavid Howells }
2109f97da78SDavid Howells
211fc63a6e0SMark Rutland #define arch_cmpxchg_relaxed(ptr,o,n) ({ \
2120ca326deSWill Deacon (__typeof__(*(ptr)))__cmpxchg((ptr), \
2139f97da78SDavid Howells (unsigned long)(o), \
2149f97da78SDavid Howells (unsigned long)(n), \
215e001bbaeSRussell King sizeof(*(ptr))); \
216e001bbaeSRussell King })
2179f97da78SDavid Howells
__cmpxchg_local(volatile void * ptr,unsigned long old,unsigned long new,int size)2189f97da78SDavid Howells static inline unsigned long __cmpxchg_local(volatile void *ptr,
2199f97da78SDavid Howells unsigned long old,
2209f97da78SDavid Howells unsigned long new, int size)
2219f97da78SDavid Howells {
2229f97da78SDavid Howells unsigned long ret;
2239f97da78SDavid Howells
2249f97da78SDavid Howells switch (size) {
2259f97da78SDavid Howells #ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
2269f97da78SDavid Howells case 1:
2279f97da78SDavid Howells case 2:
2286988631bSMark Rutland ret = __generic_cmpxchg_local(ptr, old, new, size);
2299f97da78SDavid Howells break;
2309f97da78SDavid Howells #endif
2319f97da78SDavid Howells default:
2329f97da78SDavid Howells ret = __cmpxchg(ptr, old, new, size);
2339f97da78SDavid Howells }
2349f97da78SDavid Howells
2359f97da78SDavid Howells return ret;
2369f97da78SDavid Howells }
2379f97da78SDavid Howells
238fc63a6e0SMark Rutland #define arch_cmpxchg_local(ptr, o, n) ({ \
239e001bbaeSRussell King (__typeof(*ptr))__cmpxchg_local((ptr), \
240e001bbaeSRussell King (unsigned long)(o), \
241e001bbaeSRussell King (unsigned long)(n), \
242e001bbaeSRussell King sizeof(*(ptr))); \
243e001bbaeSRussell King })
244e001bbaeSRussell King
__cmpxchg64(unsigned long long * ptr,unsigned long long old,unsigned long long new)2452523c67bSWill Deacon static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
2462523c67bSWill Deacon unsigned long long old,
2472523c67bSWill Deacon unsigned long long new)
2482523c67bSWill Deacon {
2492523c67bSWill Deacon unsigned long long oldval;
2502523c67bSWill Deacon unsigned long res;
2512523c67bSWill Deacon
252c32ffce0SWill Deacon prefetchw(ptr);
253c32ffce0SWill Deacon
2542523c67bSWill Deacon __asm__ __volatile__(
2552523c67bSWill Deacon "1: ldrexd %1, %H1, [%3]\n"
2562523c67bSWill Deacon " teq %1, %4\n"
2572523c67bSWill Deacon " teqeq %H1, %H4\n"
2582523c67bSWill Deacon " bne 2f\n"
2592523c67bSWill Deacon " strexd %0, %5, %H5, [%3]\n"
2602523c67bSWill Deacon " teq %0, #0\n"
2612523c67bSWill Deacon " bne 1b\n"
2622523c67bSWill Deacon "2:"
2632523c67bSWill Deacon : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
2642523c67bSWill Deacon : "r" (ptr), "r" (old), "r" (new)
2652523c67bSWill Deacon : "cc");
2662523c67bSWill Deacon
2672523c67bSWill Deacon return oldval;
2682523c67bSWill Deacon }
2692523c67bSWill Deacon
270fc63a6e0SMark Rutland #define arch_cmpxchg64_relaxed(ptr, o, n) ({ \
271e001bbaeSRussell King (__typeof__(*(ptr)))__cmpxchg64((ptr), \
272e001bbaeSRussell King (unsigned long long)(o), \
273e001bbaeSRussell King (unsigned long long)(n)); \
274e001bbaeSRussell King })
275e001bbaeSRussell King
276fc63a6e0SMark Rutland #define arch_cmpxchg64_local(ptr, o, n) arch_cmpxchg64_relaxed((ptr), (o), (n))
277e001bbaeSRussell King
2789f97da78SDavid Howells #endif /* __LINUX_ARM_ARCH__ >= 6 */
2799f97da78SDavid Howells
2809f97da78SDavid Howells #endif /* __ASM_ARM_CMPXCHG_H */
281