xref: /openbmc/linux/arch/arm64/include/asm/percpu.h (revision 31af04cd)
1 /*
2  * Copyright (C) 2013 ARM Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 #ifndef __ASM_PERCPU_H
17 #define __ASM_PERCPU_H
18 
19 #include <linux/preempt.h>
20 
21 #include <asm/alternative.h>
22 #include <asm/cmpxchg.h>
23 #include <asm/stack_pointer.h>
24 
25 static inline void set_my_cpu_offset(unsigned long off)
26 {
27 	asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
28 				 "msr tpidr_el2, %0",
29 				 ARM64_HAS_VIRT_HOST_EXTN)
30 			:: "r" (off) : "memory");
31 }
32 
33 static inline unsigned long __my_cpu_offset(void)
34 {
35 	unsigned long off;
36 
37 	/*
38 	 * We want to allow caching the value, so avoid using volatile and
39 	 * instead use a fake stack read to hazard against barrier().
40 	 */
41 	asm(ALTERNATIVE("mrs %0, tpidr_el1",
42 			"mrs %0, tpidr_el2",
43 			ARM64_HAS_VIRT_HOST_EXTN)
44 		: "=r" (off) :
45 		"Q" (*(const unsigned long *)current_stack_pointer));
46 
47 	return off;
48 }
49 #define __my_cpu_offset __my_cpu_offset()
50 
51 #define PERCPU_RW_OPS(sz)						\
52 static inline unsigned long __percpu_read_##sz(void *ptr)		\
53 {									\
54 	return READ_ONCE(*(u##sz *)ptr);				\
55 }									\
56 									\
57 static inline void __percpu_write_##sz(void *ptr, unsigned long val)	\
58 {									\
59 	WRITE_ONCE(*(u##sz *)ptr, (u##sz)val);				\
60 }
61 
62 #define __PERCPU_OP_CASE(w, sfx, name, sz, op_llsc, op_lse)		\
63 static inline void							\
64 __percpu_##name##_case_##sz(void *ptr, unsigned long val)		\
65 {									\
66 	unsigned int loop;						\
67 	u##sz tmp;							\
68 									\
69 	asm volatile (ARM64_LSE_ATOMIC_INSN(				\
70 	/* LL/SC */							\
71 	"1:	ldxr" #sfx "\t%" #w "[tmp], %[ptr]\n"			\
72 		#op_llsc "\t%" #w "[tmp], %" #w "[tmp], %" #w "[val]\n"	\
73 	"	stxr" #sfx "\t%w[loop], %" #w "[tmp], %[ptr]\n"		\
74 	"	cbnz	%w[loop], 1b",					\
75 	/* LSE atomics */						\
76 		#op_lse "\t%" #w "[val], %[ptr]\n"			\
77 		__nops(3))						\
78 	: [loop] "=&r" (loop), [tmp] "=&r" (tmp),			\
79 	  [ptr] "+Q"(*(u##sz *)ptr)					\
80 	: [val] "r" ((u##sz)(val)));					\
81 }
82 
83 #define __PERCPU_RET_OP_CASE(w, sfx, name, sz, op_llsc, op_lse)		\
84 static inline u##sz							\
85 __percpu_##name##_return_case_##sz(void *ptr, unsigned long val)	\
86 {									\
87 	unsigned int loop;						\
88 	u##sz ret;							\
89 									\
90 	asm volatile (ARM64_LSE_ATOMIC_INSN(				\
91 	/* LL/SC */							\
92 	"1:	ldxr" #sfx "\t%" #w "[ret], %[ptr]\n"			\
93 		#op_llsc "\t%" #w "[ret], %" #w "[ret], %" #w "[val]\n"	\
94 	"	stxr" #sfx "\t%w[loop], %" #w "[ret], %[ptr]\n"		\
95 	"	cbnz	%w[loop], 1b",					\
96 	/* LSE atomics */						\
97 		#op_lse "\t%" #w "[val], %" #w "[ret], %[ptr]\n"	\
98 		#op_llsc "\t%" #w "[ret], %" #w "[ret], %" #w "[val]\n"	\
99 		__nops(2))						\
100 	: [loop] "=&r" (loop), [ret] "=&r" (ret),			\
101 	  [ptr] "+Q"(*(u##sz *)ptr)					\
102 	: [val] "r" ((u##sz)(val)));					\
103 									\
104 	return ret;							\
105 }
106 
107 #define PERCPU_OP(name, op_llsc, op_lse)				\
108 	__PERCPU_OP_CASE(w, b, name,  8, op_llsc, op_lse)		\
109 	__PERCPU_OP_CASE(w, h, name, 16, op_llsc, op_lse)		\
110 	__PERCPU_OP_CASE(w,  , name, 32, op_llsc, op_lse)		\
111 	__PERCPU_OP_CASE( ,  , name, 64, op_llsc, op_lse)
112 
113 #define PERCPU_RET_OP(name, op_llsc, op_lse)				\
114 	__PERCPU_RET_OP_CASE(w, b, name,  8, op_llsc, op_lse)		\
115 	__PERCPU_RET_OP_CASE(w, h, name, 16, op_llsc, op_lse)		\
116 	__PERCPU_RET_OP_CASE(w,  , name, 32, op_llsc, op_lse)		\
117 	__PERCPU_RET_OP_CASE( ,  , name, 64, op_llsc, op_lse)
118 
119 PERCPU_RW_OPS(8)
120 PERCPU_RW_OPS(16)
121 PERCPU_RW_OPS(32)
122 PERCPU_RW_OPS(64)
123 PERCPU_OP(add, add, stadd)
124 PERCPU_OP(andnot, bic, stclr)
125 PERCPU_OP(or, orr, stset)
126 PERCPU_RET_OP(add, add, ldadd)
127 
128 #undef PERCPU_RW_OPS
129 #undef __PERCPU_OP_CASE
130 #undef __PERCPU_RET_OP_CASE
131 #undef PERCPU_OP
132 #undef PERCPU_RET_OP
133 
134 /*
135  * It would be nice to avoid the conditional call into the scheduler when
136  * re-enabling preemption for preemptible kernels, but doing that in a way
137  * which builds inside a module would mean messing directly with the preempt
138  * count. If you do this, peterz and tglx will hunt you down.
139  */
140 #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2)		\
141 ({									\
142 	int __ret;							\
143 	preempt_disable_notrace();					\
144 	__ret = cmpxchg_double_local(	raw_cpu_ptr(&(ptr1)),		\
145 					raw_cpu_ptr(&(ptr2)),		\
146 					o1, o2, n1, n2);		\
147 	preempt_enable_notrace();					\
148 	__ret;								\
149 })
150 
151 #define _pcp_protect(op, pcp, ...)					\
152 ({									\
153 	preempt_disable_notrace();					\
154 	op(raw_cpu_ptr(&(pcp)), __VA_ARGS__);				\
155 	preempt_enable_notrace();					\
156 })
157 
158 #define _pcp_protect_return(op, pcp, args...)				\
159 ({									\
160 	typeof(pcp) __retval;						\
161 	preempt_disable_notrace();					\
162 	__retval = (typeof(pcp))op(raw_cpu_ptr(&(pcp)), ##args);	\
163 	preempt_enable_notrace();					\
164 	__retval;							\
165 })
166 
167 #define this_cpu_read_1(pcp)		\
168 	_pcp_protect_return(__percpu_read_8, pcp)
169 #define this_cpu_read_2(pcp)		\
170 	_pcp_protect_return(__percpu_read_16, pcp)
171 #define this_cpu_read_4(pcp)		\
172 	_pcp_protect_return(__percpu_read_32, pcp)
173 #define this_cpu_read_8(pcp)		\
174 	_pcp_protect_return(__percpu_read_64, pcp)
175 
176 #define this_cpu_write_1(pcp, val)	\
177 	_pcp_protect(__percpu_write_8, pcp, (unsigned long)val)
178 #define this_cpu_write_2(pcp, val)	\
179 	_pcp_protect(__percpu_write_16, pcp, (unsigned long)val)
180 #define this_cpu_write_4(pcp, val)	\
181 	_pcp_protect(__percpu_write_32, pcp, (unsigned long)val)
182 #define this_cpu_write_8(pcp, val)	\
183 	_pcp_protect(__percpu_write_64, pcp, (unsigned long)val)
184 
185 #define this_cpu_add_1(pcp, val)	\
186 	_pcp_protect(__percpu_add_case_8, pcp, val)
187 #define this_cpu_add_2(pcp, val)	\
188 	_pcp_protect(__percpu_add_case_16, pcp, val)
189 #define this_cpu_add_4(pcp, val)	\
190 	_pcp_protect(__percpu_add_case_32, pcp, val)
191 #define this_cpu_add_8(pcp, val)	\
192 	_pcp_protect(__percpu_add_case_64, pcp, val)
193 
194 #define this_cpu_add_return_1(pcp, val)	\
195 	_pcp_protect_return(__percpu_add_return_case_8, pcp, val)
196 #define this_cpu_add_return_2(pcp, val)	\
197 	_pcp_protect_return(__percpu_add_return_case_16, pcp, val)
198 #define this_cpu_add_return_4(pcp, val)	\
199 	_pcp_protect_return(__percpu_add_return_case_32, pcp, val)
200 #define this_cpu_add_return_8(pcp, val)	\
201 	_pcp_protect_return(__percpu_add_return_case_64, pcp, val)
202 
203 #define this_cpu_and_1(pcp, val)	\
204 	_pcp_protect(__percpu_andnot_case_8, pcp, ~val)
205 #define this_cpu_and_2(pcp, val)	\
206 	_pcp_protect(__percpu_andnot_case_16, pcp, ~val)
207 #define this_cpu_and_4(pcp, val)	\
208 	_pcp_protect(__percpu_andnot_case_32, pcp, ~val)
209 #define this_cpu_and_8(pcp, val)	\
210 	_pcp_protect(__percpu_andnot_case_64, pcp, ~val)
211 
212 #define this_cpu_or_1(pcp, val)		\
213 	_pcp_protect(__percpu_or_case_8, pcp, val)
214 #define this_cpu_or_2(pcp, val)		\
215 	_pcp_protect(__percpu_or_case_16, pcp, val)
216 #define this_cpu_or_4(pcp, val)		\
217 	_pcp_protect(__percpu_or_case_32, pcp, val)
218 #define this_cpu_or_8(pcp, val)		\
219 	_pcp_protect(__percpu_or_case_64, pcp, val)
220 
221 #define this_cpu_xchg_1(pcp, val)	\
222 	_pcp_protect_return(xchg_relaxed, pcp, val)
223 #define this_cpu_xchg_2(pcp, val)	\
224 	_pcp_protect_return(xchg_relaxed, pcp, val)
225 #define this_cpu_xchg_4(pcp, val)	\
226 	_pcp_protect_return(xchg_relaxed, pcp, val)
227 #define this_cpu_xchg_8(pcp, val)	\
228 	_pcp_protect_return(xchg_relaxed, pcp, val)
229 
230 #define this_cpu_cmpxchg_1(pcp, o, n)	\
231 	_pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
232 #define this_cpu_cmpxchg_2(pcp, o, n)	\
233 	_pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
234 #define this_cpu_cmpxchg_4(pcp, o, n)	\
235 	_pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
236 #define this_cpu_cmpxchg_8(pcp, o, n)	\
237 	_pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
238 
239 #include <asm-generic/percpu.h>
240 
241 #endif /* __ASM_PERCPU_H */
242