19952f691SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */ 214318efbSRob Herring /* 314318efbSRob Herring * Copyright 2012 Calxeda, Inc. 414318efbSRob Herring */ 514318efbSRob Herring #ifndef _ASM_ARM_PERCPU_H_ 614318efbSRob Herring #define _ASM_ARM_PERCPU_H_ 714318efbSRob Herring 8a6342915SPeter Zijlstra register unsigned long current_stack_pointer asm ("sp"); 9a6342915SPeter Zijlstra 1014318efbSRob Herring /* 1114318efbSRob Herring * Same as asm-generic/percpu.h, except that we store the per cpu offset 1214318efbSRob Herring * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 1314318efbSRob Herring */ 1414318efbSRob Herring #if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6) 1514318efbSRob Herring static inline void set_my_cpu_offset(unsigned long off) 1614318efbSRob Herring { 1714318efbSRob Herring /* Set TPIDRPRW */ 1814318efbSRob Herring asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory"); 1914318efbSRob Herring } 2014318efbSRob Herring 2114318efbSRob Herring static inline unsigned long __my_cpu_offset(void) 2214318efbSRob Herring { 2314318efbSRob Herring unsigned long off; 24509eb76eSWill Deacon 25509eb76eSWill Deacon /* 26509eb76eSWill Deacon * Read TPIDRPRW. 27509eb76eSWill Deacon * We want to allow caching the value, so avoid using volatile and 28509eb76eSWill Deacon * instead use a fake stack read to hazard against barrier(). 29509eb76eSWill Deacon */ 30ccbd2da5SMark Charlebois asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) 31ccbd2da5SMark Charlebois : "Q" (*(const unsigned long *)current_stack_pointer)); 32509eb76eSWill Deacon 3314318efbSRob Herring return off; 3414318efbSRob Herring } 3514318efbSRob Herring #define __my_cpu_offset __my_cpu_offset() 3614318efbSRob Herring #else 3714318efbSRob Herring #define set_my_cpu_offset(x) do {} while(0) 3814318efbSRob Herring 3914318efbSRob Herring #endif /* CONFIG_SMP */ 4014318efbSRob Herring 4114318efbSRob Herring #include <asm-generic/percpu.h> 4214318efbSRob Herring 4314318efbSRob Herring #endif /* _ASM_ARM_PERCPU_H_ */ 44