xref: /openbmc/linux/arch/arm/include/asm/percpu.h (revision 7b9896c3)
19952f691SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
214318efbSRob Herring /*
314318efbSRob Herring  * Copyright 2012 Calxeda, Inc.
414318efbSRob Herring  */
514318efbSRob Herring #ifndef _ASM_ARM_PERCPU_H_
614318efbSRob Herring #define _ASM_ARM_PERCPU_H_
714318efbSRob Herring 
8*7b9896c3SArd Biesheuvel #include <asm/insn.h>
9*7b9896c3SArd Biesheuvel 
10a6342915SPeter Zijlstra register unsigned long current_stack_pointer asm ("sp");
11a6342915SPeter Zijlstra 
1214318efbSRob Herring /*
1314318efbSRob Herring  * Same as asm-generic/percpu.h, except that we store the per cpu offset
1414318efbSRob Herring  * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
1514318efbSRob Herring  */
16*7b9896c3SArd Biesheuvel #ifdef CONFIG_SMP
1714318efbSRob Herring static inline void set_my_cpu_offset(unsigned long off)
1814318efbSRob Herring {
19*7b9896c3SArd Biesheuvel 	extern unsigned int smp_on_up;
20*7b9896c3SArd Biesheuvel 
21*7b9896c3SArd Biesheuvel 	if (IS_ENABLED(CONFIG_CPU_V6) && !smp_on_up)
22*7b9896c3SArd Biesheuvel 		return;
23*7b9896c3SArd Biesheuvel 
2414318efbSRob Herring 	/* Set TPIDRPRW */
2514318efbSRob Herring 	asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
2614318efbSRob Herring }
2714318efbSRob Herring 
2814318efbSRob Herring static inline unsigned long __my_cpu_offset(void)
2914318efbSRob Herring {
3014318efbSRob Herring 	unsigned long off;
31509eb76eSWill Deacon 
32509eb76eSWill Deacon 	/*
33509eb76eSWill Deacon 	 * Read TPIDRPRW.
34509eb76eSWill Deacon 	 * We want to allow caching the value, so avoid using volatile and
35509eb76eSWill Deacon 	 * instead use a fake stack read to hazard against barrier().
36509eb76eSWill Deacon 	 */
37*7b9896c3SArd Biesheuvel 	asm("0:	mrc p15, 0, %0, c13, c0, 4			\n\t"
38*7b9896c3SArd Biesheuvel #ifdef CONFIG_CPU_V6
39*7b9896c3SArd Biesheuvel 	    "1:							\n\t"
40*7b9896c3SArd Biesheuvel 	    "	.subsection 1					\n\t"
41*7b9896c3SArd Biesheuvel 	    "2: " LOAD_SYM_ARMV6(%0, __per_cpu_offset) "	\n\t"
42*7b9896c3SArd Biesheuvel 	    "	b	1b					\n\t"
43*7b9896c3SArd Biesheuvel 	    "	.previous					\n\t"
44*7b9896c3SArd Biesheuvel 	    "	.pushsection \".alt.smp.init\", \"a\"		\n\t"
45*7b9896c3SArd Biesheuvel 	    "	.long	0b - .					\n\t"
46*7b9896c3SArd Biesheuvel 	    "	b	. + (2b - 0b)				\n\t"
47*7b9896c3SArd Biesheuvel 	    "	.popsection					\n\t"
48*7b9896c3SArd Biesheuvel #endif
49*7b9896c3SArd Biesheuvel 	     : "=r" (off)
50ccbd2da5SMark Charlebois 	     : "Q" (*(const unsigned long *)current_stack_pointer));
51509eb76eSWill Deacon 
5214318efbSRob Herring 	return off;
5314318efbSRob Herring }
5414318efbSRob Herring #define __my_cpu_offset __my_cpu_offset()
5514318efbSRob Herring #else
5614318efbSRob Herring #define set_my_cpu_offset(x)	do {} while(0)
5714318efbSRob Herring 
5814318efbSRob Herring #endif /* CONFIG_SMP */
5914318efbSRob Herring 
6014318efbSRob Herring #include <asm-generic/percpu.h>
6114318efbSRob Herring 
6214318efbSRob Herring #endif /* _ASM_ARM_PERCPU_H_ */
63