19952f691SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
214318efbSRob Herring /*
314318efbSRob Herring * Copyright 2012 Calxeda, Inc.
414318efbSRob Herring */
514318efbSRob Herring #ifndef _ASM_ARM_PERCPU_H_
614318efbSRob Herring #define _ASM_ARM_PERCPU_H_
714318efbSRob Herring
87b9896c3SArd Biesheuvel #include <asm/insn.h>
97b9896c3SArd Biesheuvel
10a6342915SPeter Zijlstra register unsigned long current_stack_pointer asm ("sp");
11a6342915SPeter Zijlstra
1214318efbSRob Herring /*
1314318efbSRob Herring * Same as asm-generic/percpu.h, except that we store the per cpu offset
1414318efbSRob Herring * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
1514318efbSRob Herring */
167b9896c3SArd Biesheuvel #ifdef CONFIG_SMP
set_my_cpu_offset(unsigned long off)1714318efbSRob Herring static inline void set_my_cpu_offset(unsigned long off)
1814318efbSRob Herring {
197b9896c3SArd Biesheuvel extern unsigned int smp_on_up;
207b9896c3SArd Biesheuvel
217b9896c3SArd Biesheuvel if (IS_ENABLED(CONFIG_CPU_V6) && !smp_on_up)
227b9896c3SArd Biesheuvel return;
237b9896c3SArd Biesheuvel
2414318efbSRob Herring /* Set TPIDRPRW */
2514318efbSRob Herring asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
2614318efbSRob Herring }
2714318efbSRob Herring
__my_cpu_offset(void)28*4d5a643eSArd Biesheuvel static __always_inline unsigned long __my_cpu_offset(void)
2914318efbSRob Herring {
3014318efbSRob Herring unsigned long off;
31509eb76eSWill Deacon
32509eb76eSWill Deacon /*
33509eb76eSWill Deacon * Read TPIDRPRW.
34509eb76eSWill Deacon * We want to allow caching the value, so avoid using volatile and
35509eb76eSWill Deacon * instead use a fake stack read to hazard against barrier().
36509eb76eSWill Deacon */
377b9896c3SArd Biesheuvel asm("0: mrc p15, 0, %0, c13, c0, 4 \n\t"
387b9896c3SArd Biesheuvel #ifdef CONFIG_CPU_V6
397b9896c3SArd Biesheuvel "1: \n\t"
407b9896c3SArd Biesheuvel " .subsection 1 \n\t"
41d6905849SArd Biesheuvel #if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \
42d6905849SArd Biesheuvel !(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
437b9896c3SArd Biesheuvel "2: " LOAD_SYM_ARMV6(%0, __per_cpu_offset) " \n\t"
447b9896c3SArd Biesheuvel " b 1b \n\t"
455fe41793SArd Biesheuvel #else
465fe41793SArd Biesheuvel "2: ldr %0, 3f \n\t"
475fe41793SArd Biesheuvel " ldr %0, [%0] \n\t"
485fe41793SArd Biesheuvel " b 1b \n\t"
495fe41793SArd Biesheuvel "3: .long __per_cpu_offset \n\t"
505fe41793SArd Biesheuvel #endif
517b9896c3SArd Biesheuvel " .previous \n\t"
527b9896c3SArd Biesheuvel " .pushsection \".alt.smp.init\", \"a\" \n\t"
537b9896c3SArd Biesheuvel " .long 0b - . \n\t"
547b9896c3SArd Biesheuvel " b . + (2b - 0b) \n\t"
557b9896c3SArd Biesheuvel " .popsection \n\t"
567b9896c3SArd Biesheuvel #endif
577b9896c3SArd Biesheuvel : "=r" (off)
58ccbd2da5SMark Charlebois : "Q" (*(const unsigned long *)current_stack_pointer));
59509eb76eSWill Deacon
6014318efbSRob Herring return off;
6114318efbSRob Herring }
6214318efbSRob Herring #define __my_cpu_offset __my_cpu_offset()
6314318efbSRob Herring #else
6414318efbSRob Herring #define set_my_cpu_offset(x) do {} while(0)
6514318efbSRob Herring
6614318efbSRob Herring #endif /* CONFIG_SMP */
6714318efbSRob Herring
6814318efbSRob Herring #include <asm-generic/percpu.h>
6914318efbSRob Herring
7014318efbSRob Herring #endif /* _ASM_ARM_PERCPU_H_ */
71