xref: /openbmc/linux/arch/arm64/include/asm/percpu.h (revision afb46f79)
1 /*
2  * Copyright (C) 2013 ARM Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 #ifndef __ASM_PERCPU_H
17 #define __ASM_PERCPU_H
18 
19 #ifdef CONFIG_SMP
20 
21 static inline void set_my_cpu_offset(unsigned long off)
22 {
23 	asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
24 }
25 
26 static inline unsigned long __my_cpu_offset(void)
27 {
28 	unsigned long off;
29 	register unsigned long *sp asm ("sp");
30 
31 	/*
32 	 * We want to allow caching the value, so avoid using volatile and
33 	 * instead use a fake stack read to hazard against barrier().
34 	 */
35 	asm("mrs %0, tpidr_el1" : "=r" (off) : "Q" (*sp));
36 
37 	return off;
38 }
39 #define __my_cpu_offset __my_cpu_offset()
40 
41 #else	/* !CONFIG_SMP */
42 
43 #define set_my_cpu_offset(x)	do { } while (0)
44 
45 #endif /* CONFIG_SMP */
46 
47 #include <asm-generic/percpu.h>
48 
49 #endif /* __ASM_PERCPU_H */
50