1 #ifndef _ASM_X86_PERCPU_H 2 #define _ASM_X86_PERCPU_H 3 4 #ifdef CONFIG_X86_64 5 #define __percpu_seg gs 6 #define __percpu_mov_op movq 7 #else 8 #define __percpu_seg fs 9 #define __percpu_mov_op movl 10 #endif 11 12 #ifdef __ASSEMBLY__ 13 14 /* 15 * PER_CPU finds an address of a per-cpu variable. 16 * 17 * Args: 18 * var - variable name 19 * reg - 32bit register 20 * 21 * The resulting address is stored in the "reg" argument. 22 * 23 * Example: 24 * PER_CPU(cpu_gdt_descr, %ebx) 25 */ 26 #ifdef CONFIG_SMP 27 #define PER_CPU(var, reg) \ 28 __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \ 29 lea var(reg), reg 30 #define PER_CPU_VAR(var) %__percpu_seg:var 31 #else /* ! SMP */ 32 #define PER_CPU(var, reg) __percpu_mov_op $var, reg 33 #define PER_CPU_VAR(var) var 34 #endif /* SMP */ 35 36 #ifdef CONFIG_X86_64_SMP 37 #define INIT_PER_CPU_VAR(var) init_per_cpu__##var 38 #else 39 #define INIT_PER_CPU_VAR(var) var 40 #endif 41 42 #else /* ...!ASSEMBLY */ 43 44 #include <linux/kernel.h> 45 #include <linux/stringify.h> 46 47 #ifdef CONFIG_SMP 48 #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x 49 #define __my_cpu_offset percpu_read(this_cpu_off) 50 #else 51 #define __percpu_arg(x) "%P" #x 52 #endif 53 54 /* 55 * Initialized pointers to per-cpu variables needed for the boot 56 * processor need to use these macros to get the proper address 57 * offset from __per_cpu_load on SMP. 58 * 59 * There also must be an entry in vmlinux_64.lds.S 60 */ 61 #define DECLARE_INIT_PER_CPU(var) \ 62 extern typeof(var) init_per_cpu_var(var) 63 64 #ifdef CONFIG_X86_64_SMP 65 #define init_per_cpu_var(var) init_per_cpu__##var 66 #else 67 #define init_per_cpu_var(var) var 68 #endif 69 70 /* For arch-specific code, we can use direct single-insn ops (they 71 * don't give an lvalue though). */ 72 extern void __bad_percpu_size(void); 73 74 #define percpu_to_op(op, var, val) \ 75 do { \ 76 typedef typeof(var) pto_T__; \ 77 if (0) { \ 78 pto_T__ pto_tmp__; \ 79 pto_tmp__ = (val); \ 80 (void)pto_tmp__; \ 81 } \ 82 switch (sizeof(var)) { \ 83 case 1: \ 84 asm(op "b %1,"__percpu_arg(0) \ 85 : "+m" (var) \ 86 : "qi" ((pto_T__)(val))); \ 87 break; \ 88 case 2: \ 89 asm(op "w %1,"__percpu_arg(0) \ 90 : "+m" (var) \ 91 : "ri" ((pto_T__)(val))); \ 92 break; \ 93 case 4: \ 94 asm(op "l %1,"__percpu_arg(0) \ 95 : "+m" (var) \ 96 : "ri" ((pto_T__)(val))); \ 97 break; \ 98 case 8: \ 99 asm(op "q %1,"__percpu_arg(0) \ 100 : "+m" (var) \ 101 : "re" ((pto_T__)(val))); \ 102 break; \ 103 default: __bad_percpu_size(); \ 104 } \ 105 } while (0) 106 107 /* 108 * Generate a percpu add to memory instruction and optimize code 109 * if one is added or subtracted. 110 */ 111 #define percpu_add_op(var, val) \ 112 do { \ 113 typedef typeof(var) pao_T__; \ 114 const int pao_ID__ = (__builtin_constant_p(val) && \ 115 ((val) == 1 || (val) == -1)) ? (val) : 0; \ 116 if (0) { \ 117 pao_T__ pao_tmp__; \ 118 pao_tmp__ = (val); \ 119 (void)pao_tmp__; \ 120 } \ 121 switch (sizeof(var)) { \ 122 case 1: \ 123 if (pao_ID__ == 1) \ 124 asm("incb "__percpu_arg(0) : "+m" (var)); \ 125 else if (pao_ID__ == -1) \ 126 asm("decb "__percpu_arg(0) : "+m" (var)); \ 127 else \ 128 asm("addb %1, "__percpu_arg(0) \ 129 : "+m" (var) \ 130 : "qi" ((pao_T__)(val))); \ 131 break; \ 132 case 2: \ 133 if (pao_ID__ == 1) \ 134 asm("incw "__percpu_arg(0) : "+m" (var)); \ 135 else if (pao_ID__ == -1) \ 136 asm("decw "__percpu_arg(0) : "+m" (var)); \ 137 else \ 138 asm("addw %1, "__percpu_arg(0) \ 139 : "+m" (var) \ 140 : "ri" ((pao_T__)(val))); \ 141 break; \ 142 case 4: \ 143 if (pao_ID__ == 1) \ 144 asm("incl "__percpu_arg(0) : "+m" (var)); \ 145 else if (pao_ID__ == -1) \ 146 asm("decl "__percpu_arg(0) : "+m" (var)); \ 147 else \ 148 asm("addl %1, "__percpu_arg(0) \ 149 : "+m" (var) \ 150 : "ri" ((pao_T__)(val))); \ 151 break; \ 152 case 8: \ 153 if (pao_ID__ == 1) \ 154 asm("incq "__percpu_arg(0) : "+m" (var)); \ 155 else if (pao_ID__ == -1) \ 156 asm("decq "__percpu_arg(0) : "+m" (var)); \ 157 else \ 158 asm("addq %1, "__percpu_arg(0) \ 159 : "+m" (var) \ 160 : "re" ((pao_T__)(val))); \ 161 break; \ 162 default: __bad_percpu_size(); \ 163 } \ 164 } while (0) 165 166 #define percpu_from_op(op, var, constraint) \ 167 ({ \ 168 typeof(var) pfo_ret__; \ 169 switch (sizeof(var)) { \ 170 case 1: \ 171 asm(op "b "__percpu_arg(1)",%0" \ 172 : "=q" (pfo_ret__) \ 173 : constraint); \ 174 break; \ 175 case 2: \ 176 asm(op "w "__percpu_arg(1)",%0" \ 177 : "=r" (pfo_ret__) \ 178 : constraint); \ 179 break; \ 180 case 4: \ 181 asm(op "l "__percpu_arg(1)",%0" \ 182 : "=r" (pfo_ret__) \ 183 : constraint); \ 184 break; \ 185 case 8: \ 186 asm(op "q "__percpu_arg(1)",%0" \ 187 : "=r" (pfo_ret__) \ 188 : constraint); \ 189 break; \ 190 default: __bad_percpu_size(); \ 191 } \ 192 pfo_ret__; \ 193 }) 194 195 #define percpu_unary_op(op, var) \ 196 ({ \ 197 switch (sizeof(var)) { \ 198 case 1: \ 199 asm(op "b "__percpu_arg(0) \ 200 : "+m" (var)); \ 201 break; \ 202 case 2: \ 203 asm(op "w "__percpu_arg(0) \ 204 : "+m" (var)); \ 205 break; \ 206 case 4: \ 207 asm(op "l "__percpu_arg(0) \ 208 : "+m" (var)); \ 209 break; \ 210 case 8: \ 211 asm(op "q "__percpu_arg(0) \ 212 : "+m" (var)); \ 213 break; \ 214 default: __bad_percpu_size(); \ 215 } \ 216 }) 217 218 /* 219 * percpu_read() makes gcc load the percpu variable every time it is 220 * accessed while percpu_read_stable() allows the value to be cached. 221 * percpu_read_stable() is more efficient and can be used if its value 222 * is guaranteed to be valid across cpus. The current users include 223 * get_current() and get_thread_info() both of which are actually 224 * per-thread variables implemented as per-cpu variables and thus 225 * stable for the duration of the respective task. 226 */ 227 #define percpu_read(var) percpu_from_op("mov", var, "m" (var)) 228 #define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var))) 229 #define percpu_write(var, val) percpu_to_op("mov", var, val) 230 #define percpu_add(var, val) percpu_add_op(var, val) 231 #define percpu_sub(var, val) percpu_add_op(var, -(val)) 232 #define percpu_and(var, val) percpu_to_op("and", var, val) 233 #define percpu_or(var, val) percpu_to_op("or", var, val) 234 #define percpu_xor(var, val) percpu_to_op("xor", var, val) 235 #define percpu_inc(var) percpu_unary_op("inc", var) 236 237 #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 238 #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 239 #define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 240 241 #define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) 242 #define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) 243 #define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) 244 #define __this_cpu_add_1(pcp, val) percpu_add_op((pcp), val) 245 #define __this_cpu_add_2(pcp, val) percpu_add_op((pcp), val) 246 #define __this_cpu_add_4(pcp, val) percpu_add_op((pcp), val) 247 #define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) 248 #define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) 249 #define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) 250 #define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) 251 #define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) 252 #define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) 253 #define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) 254 #define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) 255 #define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) 256 257 #define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 258 #define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 259 #define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 260 #define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) 261 #define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) 262 #define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) 263 #define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val) 264 #define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val) 265 #define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val) 266 #define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) 267 #define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) 268 #define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) 269 #define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) 270 #define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) 271 #define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) 272 #define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) 273 #define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) 274 #define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) 275 276 #define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val) 277 #define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val) 278 #define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val) 279 #define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) 280 #define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) 281 #define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) 282 #define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) 283 #define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) 284 #define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) 285 #define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) 286 #define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) 287 #define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) 288 289 /* 290 * Per cpu atomic 64 bit operations are only available under 64 bit. 291 * 32 bit must fall back to generic operations. 292 */ 293 #ifdef CONFIG_X86_64 294 #define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 295 #define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) 296 #define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) 297 #define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 298 #define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 299 #define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 300 301 #define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 302 #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) 303 #define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) 304 #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 305 #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 306 #define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 307 308 #define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val) 309 #define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 310 #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 311 #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 312 313 #endif 314 315 /* This is not atomic against other CPUs -- CPU preemption needs to be off */ 316 #define x86_test_and_clear_bit_percpu(bit, var) \ 317 ({ \ 318 int old__; \ 319 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \ 320 : "=r" (old__), "+m" (var) \ 321 : "dIr" (bit)); \ 322 old__; \ 323 }) 324 325 #include <asm-generic/percpu.h> 326 327 /* We can use this directly for local CPU (faster). */ 328 DECLARE_PER_CPU(unsigned long, this_cpu_off); 329 330 #endif /* !__ASSEMBLY__ */ 331 332 #ifdef CONFIG_SMP 333 334 /* 335 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu 336 * variables that are initialized and accessed before there are per_cpu 337 * areas allocated. 338 */ 339 340 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ 341 DEFINE_PER_CPU(_type, _name) = _initvalue; \ 342 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ 343 { [0 ... NR_CPUS-1] = _initvalue }; \ 344 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map 345 346 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ 347 EXPORT_PER_CPU_SYMBOL(_name) 348 349 #define DECLARE_EARLY_PER_CPU(_type, _name) \ 350 DECLARE_PER_CPU(_type, _name); \ 351 extern __typeof__(_type) *_name##_early_ptr; \ 352 extern __typeof__(_type) _name##_early_map[] 353 354 #define early_per_cpu_ptr(_name) (_name##_early_ptr) 355 #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) 356 #define early_per_cpu(_name, _cpu) \ 357 *(early_per_cpu_ptr(_name) ? \ 358 &early_per_cpu_ptr(_name)[_cpu] : \ 359 &per_cpu(_name, _cpu)) 360 361 #else /* !CONFIG_SMP */ 362 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ 363 DEFINE_PER_CPU(_type, _name) = _initvalue 364 365 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ 366 EXPORT_PER_CPU_SYMBOL(_name) 367 368 #define DECLARE_EARLY_PER_CPU(_type, _name) \ 369 DECLARE_PER_CPU(_type, _name) 370 371 #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu) 372 #define early_per_cpu_ptr(_name) NULL 373 /* no early_per_cpu_map() */ 374 375 #endif /* !CONFIG_SMP */ 376 377 #endif /* _ASM_X86_PERCPU_H */ 378