xref: /openbmc/linux/arch/arm/include/asm/cmpxchg.h (revision e001bbae7147b111fe1aa42beaf835635f3c016e)
19f97da78SDavid Howells #ifndef __ASM_ARM_CMPXCHG_H
29f97da78SDavid Howells #define __ASM_ARM_CMPXCHG_H
39f97da78SDavid Howells 
49f97da78SDavid Howells #include <linux/irqflags.h>
5c32ffce0SWill Deacon #include <linux/prefetch.h>
69f97da78SDavid Howells #include <asm/barrier.h>
79f97da78SDavid Howells 
89f97da78SDavid Howells #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
99f97da78SDavid Howells /*
109f97da78SDavid Howells  * On the StrongARM, "swp" is terminally broken since it bypasses the
119f97da78SDavid Howells  * cache totally.  This means that the cache becomes inconsistent, and,
129f97da78SDavid Howells  * since we use normal loads/stores as well, this is really bad.
139f97da78SDavid Howells  * Typically, this causes oopsen in filp_close, but could have other,
149f97da78SDavid Howells  * more disastrous effects.  There are two work-arounds:
159f97da78SDavid Howells  *  1. Disable interrupts and emulate the atomic swap
169f97da78SDavid Howells  *  2. Clean the cache, perform atomic swap, flush the cache
179f97da78SDavid Howells  *
189f97da78SDavid Howells  * We choose (1) since its the "easiest" to achieve here and is not
199f97da78SDavid Howells  * dependent on the processor type.
209f97da78SDavid Howells  *
219f97da78SDavid Howells  * NOTE that this solution won't work on an SMP system, so explcitly
229f97da78SDavid Howells  * forbid it here.
239f97da78SDavid Howells  */
249f97da78SDavid Howells #define swp_is_buggy
259f97da78SDavid Howells #endif
269f97da78SDavid Howells 
279f97da78SDavid Howells static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
289f97da78SDavid Howells {
299f97da78SDavid Howells 	extern void __bad_xchg(volatile void *, int);
309f97da78SDavid Howells 	unsigned long ret;
319f97da78SDavid Howells #ifdef swp_is_buggy
329f97da78SDavid Howells 	unsigned long flags;
339f97da78SDavid Howells #endif
349f97da78SDavid Howells #if __LINUX_ARM_ARCH__ >= 6
359f97da78SDavid Howells 	unsigned int tmp;
369f97da78SDavid Howells #endif
379f97da78SDavid Howells 
389f97da78SDavid Howells 	smp_mb();
39c32ffce0SWill Deacon 	prefetchw((const void *)ptr);
409f97da78SDavid Howells 
419f97da78SDavid Howells 	switch (size) {
429f97da78SDavid Howells #if __LINUX_ARM_ARCH__ >= 6
439f97da78SDavid Howells 	case 1:
449f97da78SDavid Howells 		asm volatile("@	__xchg1\n"
459f97da78SDavid Howells 		"1:	ldrexb	%0, [%3]\n"
469f97da78SDavid Howells 		"	strexb	%1, %2, [%3]\n"
479f97da78SDavid Howells 		"	teq	%1, #0\n"
489f97da78SDavid Howells 		"	bne	1b"
499f97da78SDavid Howells 			: "=&r" (ret), "=&r" (tmp)
509f97da78SDavid Howells 			: "r" (x), "r" (ptr)
519f97da78SDavid Howells 			: "memory", "cc");
529f97da78SDavid Howells 		break;
539f97da78SDavid Howells 	case 4:
549f97da78SDavid Howells 		asm volatile("@	__xchg4\n"
559f97da78SDavid Howells 		"1:	ldrex	%0, [%3]\n"
569f97da78SDavid Howells 		"	strex	%1, %2, [%3]\n"
579f97da78SDavid Howells 		"	teq	%1, #0\n"
589f97da78SDavid Howells 		"	bne	1b"
599f97da78SDavid Howells 			: "=&r" (ret), "=&r" (tmp)
609f97da78SDavid Howells 			: "r" (x), "r" (ptr)
619f97da78SDavid Howells 			: "memory", "cc");
629f97da78SDavid Howells 		break;
639f97da78SDavid Howells #elif defined(swp_is_buggy)
649f97da78SDavid Howells #ifdef CONFIG_SMP
659f97da78SDavid Howells #error SMP is not supported on this platform
669f97da78SDavid Howells #endif
679f97da78SDavid Howells 	case 1:
689f97da78SDavid Howells 		raw_local_irq_save(flags);
699f97da78SDavid Howells 		ret = *(volatile unsigned char *)ptr;
709f97da78SDavid Howells 		*(volatile unsigned char *)ptr = x;
719f97da78SDavid Howells 		raw_local_irq_restore(flags);
729f97da78SDavid Howells 		break;
739f97da78SDavid Howells 
749f97da78SDavid Howells 	case 4:
759f97da78SDavid Howells 		raw_local_irq_save(flags);
769f97da78SDavid Howells 		ret = *(volatile unsigned long *)ptr;
779f97da78SDavid Howells 		*(volatile unsigned long *)ptr = x;
789f97da78SDavid Howells 		raw_local_irq_restore(flags);
799f97da78SDavid Howells 		break;
809f97da78SDavid Howells #else
819f97da78SDavid Howells 	case 1:
829f97da78SDavid Howells 		asm volatile("@	__xchg1\n"
839f97da78SDavid Howells 		"	swpb	%0, %1, [%2]"
849f97da78SDavid Howells 			: "=&r" (ret)
859f97da78SDavid Howells 			: "r" (x), "r" (ptr)
869f97da78SDavid Howells 			: "memory", "cc");
879f97da78SDavid Howells 		break;
889f97da78SDavid Howells 	case 4:
899f97da78SDavid Howells 		asm volatile("@	__xchg4\n"
909f97da78SDavid Howells 		"	swp	%0, %1, [%2]"
919f97da78SDavid Howells 			: "=&r" (ret)
929f97da78SDavid Howells 			: "r" (x), "r" (ptr)
939f97da78SDavid Howells 			: "memory", "cc");
949f97da78SDavid Howells 		break;
959f97da78SDavid Howells #endif
969f97da78SDavid Howells 	default:
9731cd08c3SRussell King 		/* Cause a link-time error, the xchg() size is not supported */
989f97da78SDavid Howells 		__bad_xchg(ptr, size), ret = 0;
999f97da78SDavid Howells 		break;
1009f97da78SDavid Howells 	}
1019f97da78SDavid Howells 	smp_mb();
1029f97da78SDavid Howells 
1039f97da78SDavid Howells 	return ret;
1049f97da78SDavid Howells }
1059f97da78SDavid Howells 
106*e001bbaeSRussell King #define xchg(ptr, x) ({							\
107*e001bbaeSRussell King 	(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr),		\
108*e001bbaeSRussell King 				   sizeof(*(ptr)));			\
109*e001bbaeSRussell King })
1109f97da78SDavid Howells 
1119f97da78SDavid Howells #include <asm-generic/cmpxchg-local.h>
1129f97da78SDavid Howells 
1139f97da78SDavid Howells #if __LINUX_ARM_ARCH__ < 6
1149f97da78SDavid Howells /* min ARCH < ARMv6 */
1159f97da78SDavid Howells 
1169f97da78SDavid Howells #ifdef CONFIG_SMP
1179f97da78SDavid Howells #error "SMP is not supported on this platform"
1189f97da78SDavid Howells #endif
1199f97da78SDavid Howells 
1209f97da78SDavid Howells /*
1219f97da78SDavid Howells  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
1229f97da78SDavid Howells  * them available.
1239f97da78SDavid Howells  */
124*e001bbaeSRussell King #define cmpxchg_local(ptr, o, n) ({					\
125*e001bbaeSRussell King 	(__typeof(*ptr))__cmpxchg_local_generic((ptr),			\
126*e001bbaeSRussell King 					        (unsigned long)(o),	\
127*e001bbaeSRussell King 					        (unsigned long)(n),	\
128*e001bbaeSRussell King 					        sizeof(*(ptr)));	\
129*e001bbaeSRussell King })
130*e001bbaeSRussell King 
1319f97da78SDavid Howells #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1329f97da78SDavid Howells 
1339f97da78SDavid Howells #include <asm-generic/cmpxchg.h>
1349f97da78SDavid Howells 
1359f97da78SDavid Howells #else	/* min ARCH >= ARMv6 */
1369f97da78SDavid Howells 
1379f97da78SDavid Howells extern void __bad_cmpxchg(volatile void *ptr, int size);
1389f97da78SDavid Howells 
1399f97da78SDavid Howells /*
1409f97da78SDavid Howells  * cmpxchg only support 32-bits operands on ARMv6.
1419f97da78SDavid Howells  */
1429f97da78SDavid Howells 
1439f97da78SDavid Howells static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
1449f97da78SDavid Howells 				      unsigned long new, int size)
1459f97da78SDavid Howells {
1469f97da78SDavid Howells 	unsigned long oldval, res;
1479f97da78SDavid Howells 
148c32ffce0SWill Deacon 	prefetchw((const void *)ptr);
149c32ffce0SWill Deacon 
1509f97da78SDavid Howells 	switch (size) {
1519f97da78SDavid Howells #ifndef CONFIG_CPU_V6	/* min ARCH >= ARMv6K */
1529f97da78SDavid Howells 	case 1:
1539f97da78SDavid Howells 		do {
1549f97da78SDavid Howells 			asm volatile("@ __cmpxchg1\n"
1559f97da78SDavid Howells 			"	ldrexb	%1, [%2]\n"
1569f97da78SDavid Howells 			"	mov	%0, #0\n"
1579f97da78SDavid Howells 			"	teq	%1, %3\n"
1589f97da78SDavid Howells 			"	strexbeq %0, %4, [%2]\n"
1599f97da78SDavid Howells 				: "=&r" (res), "=&r" (oldval)
1609f97da78SDavid Howells 				: "r" (ptr), "Ir" (old), "r" (new)
1619f97da78SDavid Howells 				: "memory", "cc");
1629f97da78SDavid Howells 		} while (res);
1639f97da78SDavid Howells 		break;
1649f97da78SDavid Howells 	case 2:
1659f97da78SDavid Howells 		do {
1669f97da78SDavid Howells 			asm volatile("@ __cmpxchg1\n"
1679f97da78SDavid Howells 			"	ldrexh	%1, [%2]\n"
1689f97da78SDavid Howells 			"	mov	%0, #0\n"
1699f97da78SDavid Howells 			"	teq	%1, %3\n"
1709f97da78SDavid Howells 			"	strexheq %0, %4, [%2]\n"
1719f97da78SDavid Howells 				: "=&r" (res), "=&r" (oldval)
1729f97da78SDavid Howells 				: "r" (ptr), "Ir" (old), "r" (new)
1739f97da78SDavid Howells 				: "memory", "cc");
1749f97da78SDavid Howells 		} while (res);
1759f97da78SDavid Howells 		break;
1769f97da78SDavid Howells #endif
1779f97da78SDavid Howells 	case 4:
1789f97da78SDavid Howells 		do {
1799f97da78SDavid Howells 			asm volatile("@ __cmpxchg4\n"
1809f97da78SDavid Howells 			"	ldrex	%1, [%2]\n"
1819f97da78SDavid Howells 			"	mov	%0, #0\n"
1829f97da78SDavid Howells 			"	teq	%1, %3\n"
1839f97da78SDavid Howells 			"	strexeq %0, %4, [%2]\n"
1849f97da78SDavid Howells 				: "=&r" (res), "=&r" (oldval)
1859f97da78SDavid Howells 				: "r" (ptr), "Ir" (old), "r" (new)
1869f97da78SDavid Howells 				: "memory", "cc");
1879f97da78SDavid Howells 		} while (res);
1889f97da78SDavid Howells 		break;
1899f97da78SDavid Howells 	default:
1909f97da78SDavid Howells 		__bad_cmpxchg(ptr, size);
1919f97da78SDavid Howells 		oldval = 0;
1929f97da78SDavid Howells 	}
1939f97da78SDavid Howells 
1949f97da78SDavid Howells 	return oldval;
1959f97da78SDavid Howells }
1969f97da78SDavid Howells 
1979f97da78SDavid Howells static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
1989f97da78SDavid Howells 					 unsigned long new, int size)
1999f97da78SDavid Howells {
2009f97da78SDavid Howells 	unsigned long ret;
2019f97da78SDavid Howells 
2029f97da78SDavid Howells 	smp_mb();
2039f97da78SDavid Howells 	ret = __cmpxchg(ptr, old, new, size);
2049f97da78SDavid Howells 	smp_mb();
2059f97da78SDavid Howells 
2069f97da78SDavid Howells 	return ret;
2079f97da78SDavid Howells }
2089f97da78SDavid Howells 
209*e001bbaeSRussell King #define cmpxchg(ptr,o,n) ({						\
210*e001bbaeSRussell King 	(__typeof__(*(ptr)))__cmpxchg_mb((ptr),				\
2119f97da78SDavid Howells 					 (unsigned long)(o),		\
2129f97da78SDavid Howells 					 (unsigned long)(n),		\
213*e001bbaeSRussell King 					 sizeof(*(ptr)));		\
214*e001bbaeSRussell King })
2159f97da78SDavid Howells 
2169f97da78SDavid Howells static inline unsigned long __cmpxchg_local(volatile void *ptr,
2179f97da78SDavid Howells 					    unsigned long old,
2189f97da78SDavid Howells 					    unsigned long new, int size)
2199f97da78SDavid Howells {
2209f97da78SDavid Howells 	unsigned long ret;
2219f97da78SDavid Howells 
2229f97da78SDavid Howells 	switch (size) {
2239f97da78SDavid Howells #ifdef CONFIG_CPU_V6	/* min ARCH == ARMv6 */
2249f97da78SDavid Howells 	case 1:
2259f97da78SDavid Howells 	case 2:
2269f97da78SDavid Howells 		ret = __cmpxchg_local_generic(ptr, old, new, size);
2279f97da78SDavid Howells 		break;
2289f97da78SDavid Howells #endif
2299f97da78SDavid Howells 	default:
2309f97da78SDavid Howells 		ret = __cmpxchg(ptr, old, new, size);
2319f97da78SDavid Howells 	}
2329f97da78SDavid Howells 
2339f97da78SDavid Howells 	return ret;
2349f97da78SDavid Howells }
2359f97da78SDavid Howells 
236*e001bbaeSRussell King #define cmpxchg_local(ptr, o, n) ({					\
237*e001bbaeSRussell King 	(__typeof(*ptr))__cmpxchg_local((ptr),				\
238*e001bbaeSRussell King 				        (unsigned long)(o),		\
239*e001bbaeSRussell King 				        (unsigned long)(n),		\
240*e001bbaeSRussell King 				        sizeof(*(ptr)));		\
241*e001bbaeSRussell King })
242*e001bbaeSRussell King 
2432523c67bSWill Deacon static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
2442523c67bSWill Deacon 					     unsigned long long old,
2452523c67bSWill Deacon 					     unsigned long long new)
2462523c67bSWill Deacon {
2472523c67bSWill Deacon 	unsigned long long oldval;
2482523c67bSWill Deacon 	unsigned long res;
2492523c67bSWill Deacon 
250c32ffce0SWill Deacon 	prefetchw(ptr);
251c32ffce0SWill Deacon 
2522523c67bSWill Deacon 	__asm__ __volatile__(
2532523c67bSWill Deacon "1:	ldrexd		%1, %H1, [%3]\n"
2542523c67bSWill Deacon "	teq		%1, %4\n"
2552523c67bSWill Deacon "	teqeq		%H1, %H4\n"
2562523c67bSWill Deacon "	bne		2f\n"
2572523c67bSWill Deacon "	strexd		%0, %5, %H5, [%3]\n"
2582523c67bSWill Deacon "	teq		%0, #0\n"
2592523c67bSWill Deacon "	bne		1b\n"
2602523c67bSWill Deacon "2:"
2612523c67bSWill Deacon 	: "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
2622523c67bSWill Deacon 	: "r" (ptr), "r" (old), "r" (new)
2632523c67bSWill Deacon 	: "cc");
2642523c67bSWill Deacon 
2652523c67bSWill Deacon 	return oldval;
2662523c67bSWill Deacon }
2672523c67bSWill Deacon 
268*e001bbaeSRussell King #define cmpxchg64_relaxed(ptr, o, n) ({					\
269*e001bbaeSRussell King 	(__typeof__(*(ptr)))__cmpxchg64((ptr),				\
270*e001bbaeSRussell King 					(unsigned long long)(o),	\
271*e001bbaeSRussell King 					(unsigned long long)(n));	\
272*e001bbaeSRussell King })
273*e001bbaeSRussell King 
274*e001bbaeSRussell King #define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
275*e001bbaeSRussell King 
2762523c67bSWill Deacon static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
2772523c67bSWill Deacon 						unsigned long long old,
2782523c67bSWill Deacon 						unsigned long long new)
2792523c67bSWill Deacon {
2802523c67bSWill Deacon 	unsigned long long ret;
2812523c67bSWill Deacon 
2822523c67bSWill Deacon 	smp_mb();
2832523c67bSWill Deacon 	ret = __cmpxchg64(ptr, old, new);
2842523c67bSWill Deacon 	smp_mb();
2852523c67bSWill Deacon 
2862523c67bSWill Deacon 	return ret;
2872523c67bSWill Deacon }
2882523c67bSWill Deacon 
289*e001bbaeSRussell King #define cmpxchg64(ptr, o, n) ({						\
290*e001bbaeSRussell King 	(__typeof__(*(ptr)))__cmpxchg64_mb((ptr),			\
2916eabb330SJaccon Bastiaansen 					   (unsigned long long)(o),	\
292*e001bbaeSRussell King 					   (unsigned long long)(n));	\
293*e001bbaeSRussell King })
294775ebcc1SWill Deacon 
2959f97da78SDavid Howells #endif	/* __LINUX_ARM_ARCH__ >= 6 */
2969f97da78SDavid Howells 
2979f97da78SDavid Howells #endif /* __ASM_ARM_CMPXCHG_H */
298