xref: /openbmc/linux/arch/x86/include/asm/percpu.h (revision 2011a067281565494494aee194ca5081e52d6c3f)
1 #ifndef _ASM_X86_PERCPU_H
2 #define _ASM_X86_PERCPU_H
3 
4 #ifdef CONFIG_X86_64
5 #include <linux/compiler.h>
6 
7 /* Same as asm-generic/percpu.h, except that we store the per cpu offset
8    in the PDA. Longer term the PDA and every per cpu variable
9    should be just put into a single section and referenced directly
10    from %gs */
11 
12 #ifdef CONFIG_SMP
13 #include <asm/pda.h>
14 
15 #define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
16 #define __my_cpu_offset read_pda(data_offset)
17 
18 #define per_cpu_offset(x) (__per_cpu_offset(x))
19 
20 #endif
21 #include <asm-generic/percpu.h>
22 
23 DECLARE_PER_CPU(struct x8664_pda, pda);
24 
25 /*
26  * These are supposed to be implemented as a single instruction which
27  * operates on the per-cpu data base segment.  x86-64 doesn't have
28  * that yet, so this is a fairly inefficient workaround for the
29  * meantime.  The single instruction is atomic with respect to
30  * preemption and interrupts, so we need to explicitly disable
31  * interrupts here to achieve the same effect.  However, because it
32  * can be used from within interrupt-disable/enable, we can't actually
33  * disable interrupts; disabling preemption is enough.
34  */
35 #define x86_read_percpu(var)						\
36 	({								\
37 		typeof(per_cpu_var(var)) __tmp;				\
38 		preempt_disable();					\
39 		__tmp = __get_cpu_var(var);				\
40 		preempt_enable();					\
41 		__tmp;							\
42 	})
43 
44 #define x86_write_percpu(var, val)					\
45 	do {								\
46 		preempt_disable();					\
47 		__get_cpu_var(var) = (val);				\
48 		preempt_enable();					\
49 	} while(0)
50 
51 #else /* CONFIG_X86_64 */
52 
53 #ifdef __ASSEMBLY__
54 
55 /*
56  * PER_CPU finds an address of a per-cpu variable.
57  *
58  * Args:
59  *    var - variable name
60  *    reg - 32bit register
61  *
62  * The resulting address is stored in the "reg" argument.
63  *
64  * Example:
65  *    PER_CPU(cpu_gdt_descr, %ebx)
66  */
67 #ifdef CONFIG_SMP
68 #define PER_CPU(var, reg)				\
69 	movl %fs:per_cpu__##this_cpu_off, reg;		\
70 	lea per_cpu__##var(reg), reg
71 #define PER_CPU_VAR(var)	%fs:per_cpu__##var
72 #else /* ! SMP */
73 #define PER_CPU(var, reg)			\
74 	movl $per_cpu__##var, reg
75 #define PER_CPU_VAR(var)	per_cpu__##var
76 #endif	/* SMP */
77 
78 #else /* ...!ASSEMBLY */
79 
80 /*
81  * PER_CPU finds an address of a per-cpu variable.
82  *
83  * Args:
84  *    var - variable name
85  *    cpu - 32bit register containing the current CPU number
86  *
87  * The resulting address is stored in the "cpu" argument.
88  *
89  * Example:
90  *    PER_CPU(cpu_gdt_descr, %ebx)
91  */
92 #ifdef CONFIG_SMP
93 
94 #define __my_cpu_offset x86_read_percpu(this_cpu_off)
95 
96 /* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
97 #define __percpu_seg "%%fs:"
98 
99 #else  /* !SMP */
100 
101 #define __percpu_seg ""
102 
103 #endif	/* SMP */
104 
105 #include <asm-generic/percpu.h>
106 
107 /* We can use this directly for local CPU (faster). */
108 DECLARE_PER_CPU(unsigned long, this_cpu_off);
109 
110 /* For arch-specific code, we can use direct single-insn ops (they
111  * don't give an lvalue though). */
112 extern void __bad_percpu_size(void);
113 
114 #define percpu_to_op(op, var, val)			\
115 do {							\
116 	typedef typeof(var) T__;			\
117 	if (0) {					\
118 		T__ tmp__;				\
119 		tmp__ = (val);				\
120 	}						\
121 	switch (sizeof(var)) {				\
122 	case 1:						\
123 		asm(op "b %1,"__percpu_seg"%0"		\
124 		    : "+m" (var)			\
125 		    : "ri" ((T__)val));			\
126 		break;					\
127 	case 2:						\
128 		asm(op "w %1,"__percpu_seg"%0"		\
129 		    : "+m" (var)			\
130 		    : "ri" ((T__)val));			\
131 		break;					\
132 	case 4:						\
133 		asm(op "l %1,"__percpu_seg"%0"		\
134 		    : "+m" (var)			\
135 		    : "ri" ((T__)val));			\
136 		break;					\
137 	default: __bad_percpu_size();			\
138 	}						\
139 } while (0)
140 
141 #define percpu_from_op(op, var)				\
142 ({							\
143 	typeof(var) ret__;				\
144 	switch (sizeof(var)) {				\
145 	case 1:						\
146 		asm(op "b "__percpu_seg"%1,%0"		\
147 		    : "=r" (ret__)			\
148 		    : "m" (var));			\
149 		break;					\
150 	case 2:						\
151 		asm(op "w "__percpu_seg"%1,%0"		\
152 		    : "=r" (ret__)			\
153 		    : "m" (var));			\
154 		break;					\
155 	case 4:						\
156 		asm(op "l "__percpu_seg"%1,%0"		\
157 		    : "=r" (ret__)			\
158 		    : "m" (var));			\
159 		break;					\
160 	default: __bad_percpu_size();			\
161 	}						\
162 	ret__;						\
163 })
164 
165 #define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
166 #define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val)
167 #define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val)
168 #define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val)
169 #define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
170 #endif /* !__ASSEMBLY__ */
171 #endif /* !CONFIG_X86_64 */
172 
173 #ifdef CONFIG_SMP
174 
175 /*
176  * Define the "EARLY_PER_CPU" macros.  These are used for some per_cpu
177  * variables that are initialized and accessed before there are per_cpu
178  * areas allocated.
179  */
180 
181 #define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)			\
182 	DEFINE_PER_CPU(_type, _name) = _initvalue;			\
183 	__typeof__(_type) _name##_early_map[NR_CPUS] __initdata =	\
184 				{ [0 ... NR_CPUS-1] = _initvalue };	\
185 	__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
186 
187 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name)			\
188 	EXPORT_PER_CPU_SYMBOL(_name)
189 
190 #define DECLARE_EARLY_PER_CPU(_type, _name)			\
191 	DECLARE_PER_CPU(_type, _name);				\
192 	extern __typeof__(_type) *_name##_early_ptr;		\
193 	extern __typeof__(_type)  _name##_early_map[]
194 
195 #define	early_per_cpu_ptr(_name) (_name##_early_ptr)
196 #define	early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
197 #define	early_per_cpu(_name, _cpu) 				\
198 	(early_per_cpu_ptr(_name) ?				\
199 		early_per_cpu_ptr(_name)[_cpu] :		\
200 		per_cpu(_name, _cpu))
201 
202 #else	/* !CONFIG_SMP */
203 #define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)		\
204 	DEFINE_PER_CPU(_type, _name) = _initvalue
205 
206 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name)			\
207 	EXPORT_PER_CPU_SYMBOL(_name)
208 
209 #define DECLARE_EARLY_PER_CPU(_type, _name)			\
210 	DECLARE_PER_CPU(_type, _name)
211 
212 #define	early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
213 #define	early_per_cpu_ptr(_name) NULL
214 /* no early_per_cpu_map() */
215 
216 #endif	/* !CONFIG_SMP */
217 
218 #endif /* _ASM_X86_PERCPU_H */
219