xref: /openbmc/linux/arch/arm/include/asm/percpu.h (revision 9952f691)
19952f691SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
214318efbSRob Herring /*
314318efbSRob Herring  * Copyright 2012 Calxeda, Inc.
414318efbSRob Herring  */
514318efbSRob Herring #ifndef _ASM_ARM_PERCPU_H_
614318efbSRob Herring #define _ASM_ARM_PERCPU_H_
714318efbSRob Herring 
814318efbSRob Herring /*
914318efbSRob Herring  * Same as asm-generic/percpu.h, except that we store the per cpu offset
1014318efbSRob Herring  * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
1114318efbSRob Herring  */
1214318efbSRob Herring #if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6)
1314318efbSRob Herring static inline void set_my_cpu_offset(unsigned long off)
1414318efbSRob Herring {
1514318efbSRob Herring 	/* Set TPIDRPRW */
1614318efbSRob Herring 	asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
1714318efbSRob Herring }
1814318efbSRob Herring 
1914318efbSRob Herring static inline unsigned long __my_cpu_offset(void)
2014318efbSRob Herring {
2114318efbSRob Herring 	unsigned long off;
22509eb76eSWill Deacon 
23509eb76eSWill Deacon 	/*
24509eb76eSWill Deacon 	 * Read TPIDRPRW.
25509eb76eSWill Deacon 	 * We want to allow caching the value, so avoid using volatile and
26509eb76eSWill Deacon 	 * instead use a fake stack read to hazard against barrier().
27509eb76eSWill Deacon 	 */
28ccbd2da5SMark Charlebois 	asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off)
29ccbd2da5SMark Charlebois 		: "Q" (*(const unsigned long *)current_stack_pointer));
30509eb76eSWill Deacon 
3114318efbSRob Herring 	return off;
3214318efbSRob Herring }
3314318efbSRob Herring #define __my_cpu_offset __my_cpu_offset()
3414318efbSRob Herring #else
3514318efbSRob Herring #define set_my_cpu_offset(x)	do {} while(0)
3614318efbSRob Herring 
3714318efbSRob Herring #endif /* CONFIG_SMP */
3814318efbSRob Herring 
3914318efbSRob Herring #include <asm-generic/percpu.h>
4014318efbSRob Herring 
4114318efbSRob Herring #endif /* _ASM_ARM_PERCPU_H_ */
42