xref: /openbmc/linux/arch/arm64/include/asm/percpu.h (revision 0c874100)
1 /*
2  * Copyright (C) 2013 ARM Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 #ifndef __ASM_PERCPU_H
17 #define __ASM_PERCPU_H
18 
19 #include <linux/preempt.h>
20 
21 #include <asm/alternative.h>
22 #include <asm/cmpxchg.h>
23 #include <asm/stack_pointer.h>
24 
25 static inline void set_my_cpu_offset(unsigned long off)
26 {
27 	asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
28 				 "msr tpidr_el2, %0",
29 				 ARM64_HAS_VIRT_HOST_EXTN)
30 			:: "r" (off) : "memory");
31 }
32 
33 static inline unsigned long __my_cpu_offset(void)
34 {
35 	unsigned long off;
36 
37 	/*
38 	 * We want to allow caching the value, so avoid using volatile and
39 	 * instead use a fake stack read to hazard against barrier().
40 	 */
41 	asm(ALTERNATIVE("mrs %0, tpidr_el1",
42 			"mrs %0, tpidr_el2",
43 			ARM64_HAS_VIRT_HOST_EXTN)
44 		: "=r" (off) :
45 		"Q" (*(const unsigned long *)current_stack_pointer));
46 
47 	return off;
48 }
49 #define __my_cpu_offset __my_cpu_offset()
50 
51 #define PERCPU_OP(op, asm_op)						\
52 static inline unsigned long __percpu_##op(void *ptr,			\
53 			unsigned long val, int size)			\
54 {									\
55 	unsigned long loop, ret;					\
56 									\
57 	switch (size) {							\
58 	case 1:								\
59 		asm ("//__per_cpu_" #op "_1\n"				\
60 		"1:	ldxrb	  %w[ret], %[ptr]\n"			\
61 			#asm_op " %w[ret], %w[ret], %w[val]\n"		\
62 		"	stxrb	  %w[loop], %w[ret], %[ptr]\n"		\
63 		"	cbnz	  %w[loop], 1b"				\
64 		: [loop] "=&r" (loop), [ret] "=&r" (ret),		\
65 		  [ptr] "+Q"(*(u8 *)ptr)				\
66 		: [val] "Ir" (val));					\
67 		break;							\
68 	case 2:								\
69 		asm ("//__per_cpu_" #op "_2\n"				\
70 		"1:	ldxrh	  %w[ret], %[ptr]\n"			\
71 			#asm_op " %w[ret], %w[ret], %w[val]\n"		\
72 		"	stxrh	  %w[loop], %w[ret], %[ptr]\n"		\
73 		"	cbnz	  %w[loop], 1b"				\
74 		: [loop] "=&r" (loop), [ret] "=&r" (ret),		\
75 		  [ptr]  "+Q"(*(u16 *)ptr)				\
76 		: [val] "Ir" (val));					\
77 		break;							\
78 	case 4:								\
79 		asm ("//__per_cpu_" #op "_4\n"				\
80 		"1:	ldxr	  %w[ret], %[ptr]\n"			\
81 			#asm_op " %w[ret], %w[ret], %w[val]\n"		\
82 		"	stxr	  %w[loop], %w[ret], %[ptr]\n"		\
83 		"	cbnz	  %w[loop], 1b"				\
84 		: [loop] "=&r" (loop), [ret] "=&r" (ret),		\
85 		  [ptr] "+Q"(*(u32 *)ptr)				\
86 		: [val] "Ir" (val));					\
87 		break;							\
88 	case 8:								\
89 		asm ("//__per_cpu_" #op "_8\n"				\
90 		"1:	ldxr	  %[ret], %[ptr]\n"			\
91 			#asm_op " %[ret], %[ret], %[val]\n"		\
92 		"	stxr	  %w[loop], %[ret], %[ptr]\n"		\
93 		"	cbnz	  %w[loop], 1b"				\
94 		: [loop] "=&r" (loop), [ret] "=&r" (ret),		\
95 		  [ptr] "+Q"(*(u64 *)ptr)				\
96 		: [val] "Ir" (val));					\
97 		break;							\
98 	default:							\
99 		ret = 0;						\
100 		BUILD_BUG();						\
101 	}								\
102 									\
103 	return ret;							\
104 }
105 
106 PERCPU_OP(add, add)
107 PERCPU_OP(and, and)
108 PERCPU_OP(or, orr)
109 #undef PERCPU_OP
110 
111 static inline unsigned long __percpu_read(void *ptr, int size)
112 {
113 	unsigned long ret;
114 
115 	switch (size) {
116 	case 1:
117 		ret = READ_ONCE(*(u8 *)ptr);
118 		break;
119 	case 2:
120 		ret = READ_ONCE(*(u16 *)ptr);
121 		break;
122 	case 4:
123 		ret = READ_ONCE(*(u32 *)ptr);
124 		break;
125 	case 8:
126 		ret = READ_ONCE(*(u64 *)ptr);
127 		break;
128 	default:
129 		ret = 0;
130 		BUILD_BUG();
131 	}
132 
133 	return ret;
134 }
135 
136 static inline void __percpu_write(void *ptr, unsigned long val, int size)
137 {
138 	switch (size) {
139 	case 1:
140 		WRITE_ONCE(*(u8 *)ptr, (u8)val);
141 		break;
142 	case 2:
143 		WRITE_ONCE(*(u16 *)ptr, (u16)val);
144 		break;
145 	case 4:
146 		WRITE_ONCE(*(u32 *)ptr, (u32)val);
147 		break;
148 	case 8:
149 		WRITE_ONCE(*(u64 *)ptr, (u64)val);
150 		break;
151 	default:
152 		BUILD_BUG();
153 	}
154 }
155 
156 static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
157 						int size)
158 {
159 	unsigned long ret, loop;
160 
161 	switch (size) {
162 	case 1:
163 		asm ("//__percpu_xchg_1\n"
164 		"1:	ldxrb	%w[ret], %[ptr]\n"
165 		"	stxrb	%w[loop], %w[val], %[ptr]\n"
166 		"	cbnz	%w[loop], 1b"
167 		: [loop] "=&r"(loop), [ret] "=&r"(ret),
168 		  [ptr] "+Q"(*(u8 *)ptr)
169 		: [val] "r" (val));
170 		break;
171 	case 2:
172 		asm ("//__percpu_xchg_2\n"
173 		"1:	ldxrh	%w[ret], %[ptr]\n"
174 		"	stxrh	%w[loop], %w[val], %[ptr]\n"
175 		"	cbnz	%w[loop], 1b"
176 		: [loop] "=&r"(loop), [ret] "=&r"(ret),
177 		  [ptr] "+Q"(*(u16 *)ptr)
178 		: [val] "r" (val));
179 		break;
180 	case 4:
181 		asm ("//__percpu_xchg_4\n"
182 		"1:	ldxr	%w[ret], %[ptr]\n"
183 		"	stxr	%w[loop], %w[val], %[ptr]\n"
184 		"	cbnz	%w[loop], 1b"
185 		: [loop] "=&r"(loop), [ret] "=&r"(ret),
186 		  [ptr] "+Q"(*(u32 *)ptr)
187 		: [val] "r" (val));
188 		break;
189 	case 8:
190 		asm ("//__percpu_xchg_8\n"
191 		"1:	ldxr	%[ret], %[ptr]\n"
192 		"	stxr	%w[loop], %[val], %[ptr]\n"
193 		"	cbnz	%w[loop], 1b"
194 		: [loop] "=&r"(loop), [ret] "=&r"(ret),
195 		  [ptr] "+Q"(*(u64 *)ptr)
196 		: [val] "r" (val));
197 		break;
198 	default:
199 		ret = 0;
200 		BUILD_BUG();
201 	}
202 
203 	return ret;
204 }
205 
206 /* this_cpu_cmpxchg */
207 #define _protect_cmpxchg_local(pcp, o, n)			\
208 ({								\
209 	typeof(*raw_cpu_ptr(&(pcp))) __ret;			\
210 	preempt_disable();					\
211 	__ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n);	\
212 	preempt_enable();					\
213 	__ret;							\
214 })
215 
216 #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
217 #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
218 #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
219 #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
220 
221 #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2)		\
222 ({									\
223 	int __ret;							\
224 	preempt_disable();						\
225 	__ret = cmpxchg_double_local(	raw_cpu_ptr(&(ptr1)),		\
226 					raw_cpu_ptr(&(ptr2)),		\
227 					o1, o2, n1, n2);		\
228 	preempt_enable();						\
229 	__ret;								\
230 })
231 
232 #define _percpu_read(pcp)						\
233 ({									\
234 	typeof(pcp) __retval;						\
235 	preempt_disable_notrace();					\
236 	__retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), 	\
237 					      sizeof(pcp));		\
238 	preempt_enable_notrace();					\
239 	__retval;							\
240 })
241 
242 #define _percpu_write(pcp, val)						\
243 do {									\
244 	preempt_disable_notrace();					\
245 	__percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), 	\
246 				sizeof(pcp));				\
247 	preempt_enable_notrace();					\
248 } while(0)								\
249 
250 #define _pcp_protect(operation, pcp, val)			\
251 ({								\
252 	typeof(pcp) __retval;					\
253 	preempt_disable();					\
254 	__retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)),	\
255 					  (val), sizeof(pcp));	\
256 	preempt_enable();					\
257 	__retval;						\
258 })
259 
260 #define _percpu_add(pcp, val) \
261 	_pcp_protect(__percpu_add, pcp, val)
262 
263 #define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
264 
265 #define _percpu_and(pcp, val) \
266 	_pcp_protect(__percpu_and, pcp, val)
267 
268 #define _percpu_or(pcp, val) \
269 	_pcp_protect(__percpu_or, pcp, val)
270 
271 #define _percpu_xchg(pcp, val) (typeof(pcp)) \
272 	_pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))
273 
274 #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
275 #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)
276 #define this_cpu_add_4(pcp, val) _percpu_add(pcp, val)
277 #define this_cpu_add_8(pcp, val) _percpu_add(pcp, val)
278 
279 #define this_cpu_add_return_1(pcp, val) _percpu_add_return(pcp, val)
280 #define this_cpu_add_return_2(pcp, val) _percpu_add_return(pcp, val)
281 #define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val)
282 #define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val)
283 
284 #define this_cpu_and_1(pcp, val) _percpu_and(pcp, val)
285 #define this_cpu_and_2(pcp, val) _percpu_and(pcp, val)
286 #define this_cpu_and_4(pcp, val) _percpu_and(pcp, val)
287 #define this_cpu_and_8(pcp, val) _percpu_and(pcp, val)
288 
289 #define this_cpu_or_1(pcp, val) _percpu_or(pcp, val)
290 #define this_cpu_or_2(pcp, val) _percpu_or(pcp, val)
291 #define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
292 #define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
293 
294 #define this_cpu_read_1(pcp) _percpu_read(pcp)
295 #define this_cpu_read_2(pcp) _percpu_read(pcp)
296 #define this_cpu_read_4(pcp) _percpu_read(pcp)
297 #define this_cpu_read_8(pcp) _percpu_read(pcp)
298 
299 #define this_cpu_write_1(pcp, val) _percpu_write(pcp, val)
300 #define this_cpu_write_2(pcp, val) _percpu_write(pcp, val)
301 #define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
302 #define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
303 
304 #define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
305 #define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
306 #define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
307 #define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
308 
309 #include <asm-generic/percpu.h>
310 
311 #endif /* __ASM_PERCPU_H */
312