xref: /openbmc/linux/arch/arm64/include/asm/cmpxchg.h (revision e23feb16)
1 /*
2  * Based on arch/arm/include/asm/cmpxchg.h
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #ifndef __ASM_CMPXCHG_H
19 #define __ASM_CMPXCHG_H
20 
21 #include <linux/bug.h>
22 
23 #include <asm/barrier.h>
24 
25 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
26 {
27 	unsigned long ret, tmp;
28 
29 	switch (size) {
30 	case 1:
31 		asm volatile("//	__xchg1\n"
32 		"1:	ldaxrb	%w0, %2\n"
33 		"	stlxrb	%w1, %w3, %2\n"
34 		"	cbnz	%w1, 1b\n"
35 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
36 			: "r" (x)
37 			: "cc", "memory");
38 		break;
39 	case 2:
40 		asm volatile("//	__xchg2\n"
41 		"1:	ldaxrh	%w0, %2\n"
42 		"	stlxrh	%w1, %w3, %2\n"
43 		"	cbnz	%w1, 1b\n"
44 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
45 			: "r" (x)
46 			: "cc", "memory");
47 		break;
48 	case 4:
49 		asm volatile("//	__xchg4\n"
50 		"1:	ldaxr	%w0, %2\n"
51 		"	stlxr	%w1, %w3, %2\n"
52 		"	cbnz	%w1, 1b\n"
53 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
54 			: "r" (x)
55 			: "cc", "memory");
56 		break;
57 	case 8:
58 		asm volatile("//	__xchg8\n"
59 		"1:	ldaxr	%0, %2\n"
60 		"	stlxr	%w1, %3, %2\n"
61 		"	cbnz	%w1, 1b\n"
62 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
63 			: "r" (x)
64 			: "cc", "memory");
65 		break;
66 	default:
67 		BUILD_BUG();
68 	}
69 
70 	return ret;
71 }
72 
73 #define xchg(ptr,x) \
74 	((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
75 
76 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
77 				      unsigned long new, int size)
78 {
79 	unsigned long oldval = 0, res;
80 
81 	switch (size) {
82 	case 1:
83 		do {
84 			asm volatile("// __cmpxchg1\n"
85 			"	ldxrb	%w1, %2\n"
86 			"	mov	%w0, #0\n"
87 			"	cmp	%w1, %w3\n"
88 			"	b.ne	1f\n"
89 			"	stxrb	%w0, %w4, %2\n"
90 			"1:\n"
91 				: "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
92 				: "Ir" (old), "r" (new)
93 				: "cc");
94 		} while (res);
95 		break;
96 
97 	case 2:
98 		do {
99 			asm volatile("// __cmpxchg2\n"
100 			"	ldxrh	%w1, %2\n"
101 			"	mov	%w0, #0\n"
102 			"	cmp	%w1, %w3\n"
103 			"	b.ne	1f\n"
104 			"	stxrh	%w0, %w4, %2\n"
105 			"1:\n"
106 				: "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
107 				: "Ir" (old), "r" (new)
108 				: "cc");
109 		} while (res);
110 		break;
111 
112 	case 4:
113 		do {
114 			asm volatile("// __cmpxchg4\n"
115 			"	ldxr	%w1, %2\n"
116 			"	mov	%w0, #0\n"
117 			"	cmp	%w1, %w3\n"
118 			"	b.ne	1f\n"
119 			"	stxr	%w0, %w4, %2\n"
120 			"1:\n"
121 				: "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
122 				: "Ir" (old), "r" (new)
123 				: "cc");
124 		} while (res);
125 		break;
126 
127 	case 8:
128 		do {
129 			asm volatile("// __cmpxchg8\n"
130 			"	ldxr	%1, %2\n"
131 			"	mov	%w0, #0\n"
132 			"	cmp	%1, %3\n"
133 			"	b.ne	1f\n"
134 			"	stxr	%w0, %4, %2\n"
135 			"1:\n"
136 				: "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
137 				: "Ir" (old), "r" (new)
138 				: "cc");
139 		} while (res);
140 		break;
141 
142 	default:
143 		BUILD_BUG();
144 	}
145 
146 	return oldval;
147 }
148 
149 static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
150 					 unsigned long new, int size)
151 {
152 	unsigned long ret;
153 
154 	smp_mb();
155 	ret = __cmpxchg(ptr, old, new, size);
156 	smp_mb();
157 
158 	return ret;
159 }
160 
161 #define cmpxchg(ptr,o,n)						\
162 	((__typeof__(*(ptr)))__cmpxchg_mb((ptr),			\
163 					  (unsigned long)(o),		\
164 					  (unsigned long)(n),		\
165 					  sizeof(*(ptr))))
166 
167 #define cmpxchg_local(ptr,o,n)						\
168 	((__typeof__(*(ptr)))__cmpxchg((ptr),				\
169 				       (unsigned long)(o),		\
170 				       (unsigned long)(n),		\
171 				       sizeof(*(ptr))))
172 
173 #define cmpxchg64(ptr,o,n)		cmpxchg((ptr),(o),(n))
174 #define cmpxchg64_local(ptr,o,n)	cmpxchg_local((ptr),(o),(n))
175 
176 #endif	/* __ASM_CMPXCHG_H */
177