1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright 2012 Calxeda, Inc. 4 */ 5 #ifndef _ASM_ARM_PERCPU_H_ 6 #define _ASM_ARM_PERCPU_H_ 7 8 #include <asm/insn.h> 9 10 register unsigned long current_stack_pointer asm ("sp"); 11 12 /* 13 * Same as asm-generic/percpu.h, except that we store the per cpu offset 14 * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 15 */ 16 #ifdef CONFIG_SMP 17 static inline void set_my_cpu_offset(unsigned long off) 18 { 19 extern unsigned int smp_on_up; 20 21 if (IS_ENABLED(CONFIG_CPU_V6) && !smp_on_up) 22 return; 23 24 /* Set TPIDRPRW */ 25 asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory"); 26 } 27 28 static inline unsigned long __my_cpu_offset(void) 29 { 30 unsigned long off; 31 32 /* 33 * Read TPIDRPRW. 34 * We want to allow caching the value, so avoid using volatile and 35 * instead use a fake stack read to hazard against barrier(). 36 */ 37 asm("0: mrc p15, 0, %0, c13, c0, 4 \n\t" 38 #ifdef CONFIG_CPU_V6 39 "1: \n\t" 40 " .subsection 1 \n\t" 41 "2: " LOAD_SYM_ARMV6(%0, __per_cpu_offset) " \n\t" 42 " b 1b \n\t" 43 " .previous \n\t" 44 " .pushsection \".alt.smp.init\", \"a\" \n\t" 45 " .long 0b - . \n\t" 46 " b . + (2b - 0b) \n\t" 47 " .popsection \n\t" 48 #endif 49 : "=r" (off) 50 : "Q" (*(const unsigned long *)current_stack_pointer)); 51 52 return off; 53 } 54 #define __my_cpu_offset __my_cpu_offset() 55 #else 56 #define set_my_cpu_offset(x) do {} while(0) 57 58 #endif /* CONFIG_SMP */ 59 60 #include <asm-generic/percpu.h> 61 62 #endif /* _ASM_ARM_PERCPU_H_ */ 63