xref: /openbmc/linux/arch/arm/include/asm/cmpxchg.h (revision 9f97da78bf018206fb623cd351d454af2f105fe0)
1*9f97da78SDavid Howells #ifndef __ASM_ARM_CMPXCHG_H
2*9f97da78SDavid Howells #define __ASM_ARM_CMPXCHG_H
3*9f97da78SDavid Howells 
4*9f97da78SDavid Howells #include <linux/irqflags.h>
5*9f97da78SDavid Howells #include <asm/barrier.h>
6*9f97da78SDavid Howells 
7*9f97da78SDavid Howells #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
8*9f97da78SDavid Howells /*
9*9f97da78SDavid Howells  * On the StrongARM, "swp" is terminally broken since it bypasses the
10*9f97da78SDavid Howells  * cache totally.  This means that the cache becomes inconsistent, and,
11*9f97da78SDavid Howells  * since we use normal loads/stores as well, this is really bad.
12*9f97da78SDavid Howells  * Typically, this causes oopsen in filp_close, but could have other,
13*9f97da78SDavid Howells  * more disastrous effects.  There are two work-arounds:
14*9f97da78SDavid Howells  *  1. Disable interrupts and emulate the atomic swap
15*9f97da78SDavid Howells  *  2. Clean the cache, perform atomic swap, flush the cache
16*9f97da78SDavid Howells  *
17*9f97da78SDavid Howells  * We choose (1) since its the "easiest" to achieve here and is not
18*9f97da78SDavid Howells  * dependent on the processor type.
19*9f97da78SDavid Howells  *
20*9f97da78SDavid Howells  * NOTE that this solution won't work on an SMP system, so explcitly
21*9f97da78SDavid Howells  * forbid it here.
22*9f97da78SDavid Howells  */
23*9f97da78SDavid Howells #define swp_is_buggy
24*9f97da78SDavid Howells #endif
25*9f97da78SDavid Howells 
26*9f97da78SDavid Howells static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
27*9f97da78SDavid Howells {
28*9f97da78SDavid Howells 	extern void __bad_xchg(volatile void *, int);
29*9f97da78SDavid Howells 	unsigned long ret;
30*9f97da78SDavid Howells #ifdef swp_is_buggy
31*9f97da78SDavid Howells 	unsigned long flags;
32*9f97da78SDavid Howells #endif
33*9f97da78SDavid Howells #if __LINUX_ARM_ARCH__ >= 6
34*9f97da78SDavid Howells 	unsigned int tmp;
35*9f97da78SDavid Howells #endif
36*9f97da78SDavid Howells 
37*9f97da78SDavid Howells 	smp_mb();
38*9f97da78SDavid Howells 
39*9f97da78SDavid Howells 	switch (size) {
40*9f97da78SDavid Howells #if __LINUX_ARM_ARCH__ >= 6
41*9f97da78SDavid Howells 	case 1:
42*9f97da78SDavid Howells 		asm volatile("@	__xchg1\n"
43*9f97da78SDavid Howells 		"1:	ldrexb	%0, [%3]\n"
44*9f97da78SDavid Howells 		"	strexb	%1, %2, [%3]\n"
45*9f97da78SDavid Howells 		"	teq	%1, #0\n"
46*9f97da78SDavid Howells 		"	bne	1b"
47*9f97da78SDavid Howells 			: "=&r" (ret), "=&r" (tmp)
48*9f97da78SDavid Howells 			: "r" (x), "r" (ptr)
49*9f97da78SDavid Howells 			: "memory", "cc");
50*9f97da78SDavid Howells 		break;
51*9f97da78SDavid Howells 	case 4:
52*9f97da78SDavid Howells 		asm volatile("@	__xchg4\n"
53*9f97da78SDavid Howells 		"1:	ldrex	%0, [%3]\n"
54*9f97da78SDavid Howells 		"	strex	%1, %2, [%3]\n"
55*9f97da78SDavid Howells 		"	teq	%1, #0\n"
56*9f97da78SDavid Howells 		"	bne	1b"
57*9f97da78SDavid Howells 			: "=&r" (ret), "=&r" (tmp)
58*9f97da78SDavid Howells 			: "r" (x), "r" (ptr)
59*9f97da78SDavid Howells 			: "memory", "cc");
60*9f97da78SDavid Howells 		break;
61*9f97da78SDavid Howells #elif defined(swp_is_buggy)
62*9f97da78SDavid Howells #ifdef CONFIG_SMP
63*9f97da78SDavid Howells #error SMP is not supported on this platform
64*9f97da78SDavid Howells #endif
65*9f97da78SDavid Howells 	case 1:
66*9f97da78SDavid Howells 		raw_local_irq_save(flags);
67*9f97da78SDavid Howells 		ret = *(volatile unsigned char *)ptr;
68*9f97da78SDavid Howells 		*(volatile unsigned char *)ptr = x;
69*9f97da78SDavid Howells 		raw_local_irq_restore(flags);
70*9f97da78SDavid Howells 		break;
71*9f97da78SDavid Howells 
72*9f97da78SDavid Howells 	case 4:
73*9f97da78SDavid Howells 		raw_local_irq_save(flags);
74*9f97da78SDavid Howells 		ret = *(volatile unsigned long *)ptr;
75*9f97da78SDavid Howells 		*(volatile unsigned long *)ptr = x;
76*9f97da78SDavid Howells 		raw_local_irq_restore(flags);
77*9f97da78SDavid Howells 		break;
78*9f97da78SDavid Howells #else
79*9f97da78SDavid Howells 	case 1:
80*9f97da78SDavid Howells 		asm volatile("@	__xchg1\n"
81*9f97da78SDavid Howells 		"	swpb	%0, %1, [%2]"
82*9f97da78SDavid Howells 			: "=&r" (ret)
83*9f97da78SDavid Howells 			: "r" (x), "r" (ptr)
84*9f97da78SDavid Howells 			: "memory", "cc");
85*9f97da78SDavid Howells 		break;
86*9f97da78SDavid Howells 	case 4:
87*9f97da78SDavid Howells 		asm volatile("@	__xchg4\n"
88*9f97da78SDavid Howells 		"	swp	%0, %1, [%2]"
89*9f97da78SDavid Howells 			: "=&r" (ret)
90*9f97da78SDavid Howells 			: "r" (x), "r" (ptr)
91*9f97da78SDavid Howells 			: "memory", "cc");
92*9f97da78SDavid Howells 		break;
93*9f97da78SDavid Howells #endif
94*9f97da78SDavid Howells 	default:
95*9f97da78SDavid Howells 		__bad_xchg(ptr, size), ret = 0;
96*9f97da78SDavid Howells 		break;
97*9f97da78SDavid Howells 	}
98*9f97da78SDavid Howells 	smp_mb();
99*9f97da78SDavid Howells 
100*9f97da78SDavid Howells 	return ret;
101*9f97da78SDavid Howells }
102*9f97da78SDavid Howells 
103*9f97da78SDavid Howells #define xchg(ptr,x) \
104*9f97da78SDavid Howells 	((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
105*9f97da78SDavid Howells 
106*9f97da78SDavid Howells #include <asm-generic/cmpxchg-local.h>
107*9f97da78SDavid Howells 
108*9f97da78SDavid Howells #if __LINUX_ARM_ARCH__ < 6
109*9f97da78SDavid Howells /* min ARCH < ARMv6 */
110*9f97da78SDavid Howells 
111*9f97da78SDavid Howells #ifdef CONFIG_SMP
112*9f97da78SDavid Howells #error "SMP is not supported on this platform"
113*9f97da78SDavid Howells #endif
114*9f97da78SDavid Howells 
115*9f97da78SDavid Howells /*
116*9f97da78SDavid Howells  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
117*9f97da78SDavid Howells  * them available.
118*9f97da78SDavid Howells  */
119*9f97da78SDavid Howells #define cmpxchg_local(ptr, o, n)				  	       \
120*9f97da78SDavid Howells 	((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
121*9f97da78SDavid Howells 			(unsigned long)(n), sizeof(*(ptr))))
122*9f97da78SDavid Howells #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
123*9f97da78SDavid Howells 
124*9f97da78SDavid Howells #ifndef CONFIG_SMP
125*9f97da78SDavid Howells #include <asm-generic/cmpxchg.h>
126*9f97da78SDavid Howells #endif
127*9f97da78SDavid Howells 
128*9f97da78SDavid Howells #else	/* min ARCH >= ARMv6 */
129*9f97da78SDavid Howells 
130*9f97da78SDavid Howells extern void __bad_cmpxchg(volatile void *ptr, int size);
131*9f97da78SDavid Howells 
132*9f97da78SDavid Howells /*
133*9f97da78SDavid Howells  * cmpxchg only support 32-bits operands on ARMv6.
134*9f97da78SDavid Howells  */
135*9f97da78SDavid Howells 
136*9f97da78SDavid Howells static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
137*9f97da78SDavid Howells 				      unsigned long new, int size)
138*9f97da78SDavid Howells {
139*9f97da78SDavid Howells 	unsigned long oldval, res;
140*9f97da78SDavid Howells 
141*9f97da78SDavid Howells 	switch (size) {
142*9f97da78SDavid Howells #ifndef CONFIG_CPU_V6	/* min ARCH >= ARMv6K */
143*9f97da78SDavid Howells 	case 1:
144*9f97da78SDavid Howells 		do {
145*9f97da78SDavid Howells 			asm volatile("@ __cmpxchg1\n"
146*9f97da78SDavid Howells 			"	ldrexb	%1, [%2]\n"
147*9f97da78SDavid Howells 			"	mov	%0, #0\n"
148*9f97da78SDavid Howells 			"	teq	%1, %3\n"
149*9f97da78SDavid Howells 			"	strexbeq %0, %4, [%2]\n"
150*9f97da78SDavid Howells 				: "=&r" (res), "=&r" (oldval)
151*9f97da78SDavid Howells 				: "r" (ptr), "Ir" (old), "r" (new)
152*9f97da78SDavid Howells 				: "memory", "cc");
153*9f97da78SDavid Howells 		} while (res);
154*9f97da78SDavid Howells 		break;
155*9f97da78SDavid Howells 	case 2:
156*9f97da78SDavid Howells 		do {
157*9f97da78SDavid Howells 			asm volatile("@ __cmpxchg1\n"
158*9f97da78SDavid Howells 			"	ldrexh	%1, [%2]\n"
159*9f97da78SDavid Howells 			"	mov	%0, #0\n"
160*9f97da78SDavid Howells 			"	teq	%1, %3\n"
161*9f97da78SDavid Howells 			"	strexheq %0, %4, [%2]\n"
162*9f97da78SDavid Howells 				: "=&r" (res), "=&r" (oldval)
163*9f97da78SDavid Howells 				: "r" (ptr), "Ir" (old), "r" (new)
164*9f97da78SDavid Howells 				: "memory", "cc");
165*9f97da78SDavid Howells 		} while (res);
166*9f97da78SDavid Howells 		break;
167*9f97da78SDavid Howells #endif
168*9f97da78SDavid Howells 	case 4:
169*9f97da78SDavid Howells 		do {
170*9f97da78SDavid Howells 			asm volatile("@ __cmpxchg4\n"
171*9f97da78SDavid Howells 			"	ldrex	%1, [%2]\n"
172*9f97da78SDavid Howells 			"	mov	%0, #0\n"
173*9f97da78SDavid Howells 			"	teq	%1, %3\n"
174*9f97da78SDavid Howells 			"	strexeq %0, %4, [%2]\n"
175*9f97da78SDavid Howells 				: "=&r" (res), "=&r" (oldval)
176*9f97da78SDavid Howells 				: "r" (ptr), "Ir" (old), "r" (new)
177*9f97da78SDavid Howells 				: "memory", "cc");
178*9f97da78SDavid Howells 		} while (res);
179*9f97da78SDavid Howells 		break;
180*9f97da78SDavid Howells 	default:
181*9f97da78SDavid Howells 		__bad_cmpxchg(ptr, size);
182*9f97da78SDavid Howells 		oldval = 0;
183*9f97da78SDavid Howells 	}
184*9f97da78SDavid Howells 
185*9f97da78SDavid Howells 	return oldval;
186*9f97da78SDavid Howells }
187*9f97da78SDavid Howells 
188*9f97da78SDavid Howells static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
189*9f97da78SDavid Howells 					 unsigned long new, int size)
190*9f97da78SDavid Howells {
191*9f97da78SDavid Howells 	unsigned long ret;
192*9f97da78SDavid Howells 
193*9f97da78SDavid Howells 	smp_mb();
194*9f97da78SDavid Howells 	ret = __cmpxchg(ptr, old, new, size);
195*9f97da78SDavid Howells 	smp_mb();
196*9f97da78SDavid Howells 
197*9f97da78SDavid Howells 	return ret;
198*9f97da78SDavid Howells }
199*9f97da78SDavid Howells 
200*9f97da78SDavid Howells #define cmpxchg(ptr,o,n)						\
201*9f97da78SDavid Howells 	((__typeof__(*(ptr)))__cmpxchg_mb((ptr),			\
202*9f97da78SDavid Howells 					  (unsigned long)(o),		\
203*9f97da78SDavid Howells 					  (unsigned long)(n),		\
204*9f97da78SDavid Howells 					  sizeof(*(ptr))))
205*9f97da78SDavid Howells 
206*9f97da78SDavid Howells static inline unsigned long __cmpxchg_local(volatile void *ptr,
207*9f97da78SDavid Howells 					    unsigned long old,
208*9f97da78SDavid Howells 					    unsigned long new, int size)
209*9f97da78SDavid Howells {
210*9f97da78SDavid Howells 	unsigned long ret;
211*9f97da78SDavid Howells 
212*9f97da78SDavid Howells 	switch (size) {
213*9f97da78SDavid Howells #ifdef CONFIG_CPU_V6	/* min ARCH == ARMv6 */
214*9f97da78SDavid Howells 	case 1:
215*9f97da78SDavid Howells 	case 2:
216*9f97da78SDavid Howells 		ret = __cmpxchg_local_generic(ptr, old, new, size);
217*9f97da78SDavid Howells 		break;
218*9f97da78SDavid Howells #endif
219*9f97da78SDavid Howells 	default:
220*9f97da78SDavid Howells 		ret = __cmpxchg(ptr, old, new, size);
221*9f97da78SDavid Howells 	}
222*9f97da78SDavid Howells 
223*9f97da78SDavid Howells 	return ret;
224*9f97da78SDavid Howells }
225*9f97da78SDavid Howells 
226*9f97da78SDavid Howells #define cmpxchg_local(ptr,o,n)						\
227*9f97da78SDavid Howells 	((__typeof__(*(ptr)))__cmpxchg_local((ptr),			\
228*9f97da78SDavid Howells 				       (unsigned long)(o),		\
229*9f97da78SDavid Howells 				       (unsigned long)(n),		\
230*9f97da78SDavid Howells 				       sizeof(*(ptr))))
231*9f97da78SDavid Howells 
232*9f97da78SDavid Howells #ifndef CONFIG_CPU_V6	/* min ARCH >= ARMv6K */
233*9f97da78SDavid Howells 
234*9f97da78SDavid Howells /*
235*9f97da78SDavid Howells  * Note : ARMv7-M (currently unsupported by Linux) does not support
236*9f97da78SDavid Howells  * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should
237*9f97da78SDavid Howells  * not be allowed to use __cmpxchg64.
238*9f97da78SDavid Howells  */
239*9f97da78SDavid Howells static inline unsigned long long __cmpxchg64(volatile void *ptr,
240*9f97da78SDavid Howells 					     unsigned long long old,
241*9f97da78SDavid Howells 					     unsigned long long new)
242*9f97da78SDavid Howells {
243*9f97da78SDavid Howells 	register unsigned long long oldval asm("r0");
244*9f97da78SDavid Howells 	register unsigned long long __old asm("r2") = old;
245*9f97da78SDavid Howells 	register unsigned long long __new asm("r4") = new;
246*9f97da78SDavid Howells 	unsigned long res;
247*9f97da78SDavid Howells 
248*9f97da78SDavid Howells 	do {
249*9f97da78SDavid Howells 		asm volatile(
250*9f97da78SDavid Howells 		"	@ __cmpxchg8\n"
251*9f97da78SDavid Howells 		"	ldrexd	%1, %H1, [%2]\n"
252*9f97da78SDavid Howells 		"	mov	%0, #0\n"
253*9f97da78SDavid Howells 		"	teq	%1, %3\n"
254*9f97da78SDavid Howells 		"	teqeq	%H1, %H3\n"
255*9f97da78SDavid Howells 		"	strexdeq %0, %4, %H4, [%2]\n"
256*9f97da78SDavid Howells 			: "=&r" (res), "=&r" (oldval)
257*9f97da78SDavid Howells 			: "r" (ptr), "Ir" (__old), "r" (__new)
258*9f97da78SDavid Howells 			: "memory", "cc");
259*9f97da78SDavid Howells 	} while (res);
260*9f97da78SDavid Howells 
261*9f97da78SDavid Howells 	return oldval;
262*9f97da78SDavid Howells }
263*9f97da78SDavid Howells 
264*9f97da78SDavid Howells static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
265*9f97da78SDavid Howells 						unsigned long long old,
266*9f97da78SDavid Howells 						unsigned long long new)
267*9f97da78SDavid Howells {
268*9f97da78SDavid Howells 	unsigned long long ret;
269*9f97da78SDavid Howells 
270*9f97da78SDavid Howells 	smp_mb();
271*9f97da78SDavid Howells 	ret = __cmpxchg64(ptr, old, new);
272*9f97da78SDavid Howells 	smp_mb();
273*9f97da78SDavid Howells 
274*9f97da78SDavid Howells 	return ret;
275*9f97da78SDavid Howells }
276*9f97da78SDavid Howells 
277*9f97da78SDavid Howells #define cmpxchg64(ptr,o,n)						\
278*9f97da78SDavid Howells 	((__typeof__(*(ptr)))__cmpxchg64_mb((ptr),			\
279*9f97da78SDavid Howells 					    (unsigned long long)(o),	\
280*9f97da78SDavid Howells 					    (unsigned long long)(n)))
281*9f97da78SDavid Howells 
282*9f97da78SDavid Howells #define cmpxchg64_local(ptr,o,n)					\
283*9f97da78SDavid Howells 	((__typeof__(*(ptr)))__cmpxchg64((ptr),				\
284*9f97da78SDavid Howells 					 (unsigned long long)(o),	\
285*9f97da78SDavid Howells 					 (unsigned long long)(n)))
286*9f97da78SDavid Howells 
287*9f97da78SDavid Howells #else /* min ARCH = ARMv6 */
288*9f97da78SDavid Howells 
289*9f97da78SDavid Howells #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
290*9f97da78SDavid Howells 
291*9f97da78SDavid Howells #endif
292*9f97da78SDavid Howells 
293*9f97da78SDavid Howells #endif	/* __LINUX_ARM_ARCH__ >= 6 */
294*9f97da78SDavid Howells 
295*9f97da78SDavid Howells #endif /* __ASM_ARM_CMPXCHG_H */
296