percpu.h (4b4193256c8d3bc3a5397b5cd9494c2ad386317d) | percpu.h (7b9896c352073156a325c3bb0dc4c46e06e2a468) |
---|---|
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright 2012 Calxeda, Inc. 4 */ 5#ifndef _ASM_ARM_PERCPU_H_ 6#define _ASM_ARM_PERCPU_H_ 7 | 1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright 2012 Calxeda, Inc. 4 */ 5#ifndef _ASM_ARM_PERCPU_H_ 6#define _ASM_ARM_PERCPU_H_ 7 |
8#include <asm/insn.h> 9 |
|
8register unsigned long current_stack_pointer asm ("sp"); 9 10/* 11 * Same as asm-generic/percpu.h, except that we store the per cpu offset 12 * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 13 */ | 10register unsigned long current_stack_pointer asm ("sp"); 11 12/* 13 * Same as asm-generic/percpu.h, except that we store the per cpu offset 14 * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 15 */ |
14#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6) | 16#ifdef CONFIG_SMP |
15static inline void set_my_cpu_offset(unsigned long off) 16{ | 17static inline void set_my_cpu_offset(unsigned long off) 18{ |
19 extern unsigned int smp_on_up; 20 21 if (IS_ENABLED(CONFIG_CPU_V6) && !smp_on_up) 22 return; 23 |
|
17 /* Set TPIDRPRW */ 18 asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory"); 19} 20 21static inline unsigned long __my_cpu_offset(void) 22{ 23 unsigned long off; 24 25 /* 26 * Read TPIDRPRW. 27 * We want to allow caching the value, so avoid using volatile and 28 * instead use a fake stack read to hazard against barrier(). 29 */ | 24 /* Set TPIDRPRW */ 25 asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory"); 26} 27 28static inline unsigned long __my_cpu_offset(void) 29{ 30 unsigned long off; 31 32 /* 33 * Read TPIDRPRW. 34 * We want to allow caching the value, so avoid using volatile and 35 * instead use a fake stack read to hazard against barrier(). 36 */ |
30 asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) 31 : "Q" (*(const unsigned long *)current_stack_pointer)); | 37 asm("0: mrc p15, 0, %0, c13, c0, 4 \n\t" 38#ifdef CONFIG_CPU_V6 39 "1: \n\t" 40 " .subsection 1 \n\t" 41 "2: " LOAD_SYM_ARMV6(%0, __per_cpu_offset) " \n\t" 42 " b 1b \n\t" 43 " .previous \n\t" 44 " .pushsection \".alt.smp.init\", \"a\" \n\t" 45 " .long 0b - . \n\t" 46 " b . + (2b - 0b) \n\t" 47 " .popsection \n\t" 48#endif 49 : "=r" (off) 50 : "Q" (*(const unsigned long *)current_stack_pointer)); |
32 33 return off; 34} 35#define __my_cpu_offset __my_cpu_offset() 36#else 37#define set_my_cpu_offset(x) do {} while(0) 38 39#endif /* CONFIG_SMP */ 40 41#include <asm-generic/percpu.h> 42 43#endif /* _ASM_ARM_PERCPU_H_ */ | 51 52 return off; 53} 54#define __my_cpu_offset __my_cpu_offset() 55#else 56#define set_my_cpu_offset(x) do {} while(0) 57 58#endif /* CONFIG_SMP */ 59 60#include <asm-generic/percpu.h> 61 62#endif /* _ASM_ARM_PERCPU_H_ */ |