1 #ifndef _ASM_X86_APIC_H 2 #define _ASM_X86_APIC_H 3 4 #include <linux/cpumask.h> 5 #include <linux/delay.h> 6 #include <linux/pm.h> 7 8 #include <asm/alternative.h> 9 #include <asm/cpufeature.h> 10 #include <asm/processor.h> 11 #include <asm/apicdef.h> 12 #include <asm/atomic.h> 13 #include <asm/fixmap.h> 14 #include <asm/mpspec.h> 15 #include <asm/system.h> 16 #include <asm/msr.h> 17 18 #define ARCH_APICTIMER_STOPS_ON_C3 1 19 20 /* 21 * Debugging macros 22 */ 23 #define APIC_QUIET 0 24 #define APIC_VERBOSE 1 25 #define APIC_DEBUG 2 26 27 /* 28 * Define the default level of output to be very little 29 * This can be turned up by using apic=verbose for more 30 * information and apic=debug for _lots_ of information. 31 * apic_verbosity is defined in apic.c 32 */ 33 #define apic_printk(v, s, a...) do { \ 34 if ((v) <= apic_verbosity) \ 35 printk(s, ##a); \ 36 } while (0) 37 38 39 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) 40 extern void generic_apic_probe(void); 41 #else 42 static inline void generic_apic_probe(void) 43 { 44 } 45 #endif 46 47 #ifdef CONFIG_X86_LOCAL_APIC 48 49 extern unsigned int apic_verbosity; 50 extern int local_apic_timer_c2_ok; 51 52 extern int disable_apic; 53 54 #ifdef CONFIG_SMP 55 extern void __inquire_remote_apic(int apicid); 56 #else /* CONFIG_SMP */ 57 static inline void __inquire_remote_apic(int apicid) 58 { 59 } 60 #endif /* CONFIG_SMP */ 61 62 static inline void default_inquire_remote_apic(int apicid) 63 { 64 if (apic_verbosity >= APIC_DEBUG) 65 __inquire_remote_apic(apicid); 66 } 67 68 /* 69 * With 82489DX we can't rely on apic feature bit 70 * retrieved via cpuid but still have to deal with 71 * such an apic chip so we assume that SMP configuration 72 * is found from MP table (64bit case uses ACPI mostly 73 * which set smp presence flag as well so we are safe 74 * to use this helper too). 75 */ 76 static inline bool apic_from_smp_config(void) 77 { 78 return smp_found_config && !disable_apic; 79 } 80 81 /* 82 * Basic functions accessing APICs. 83 */ 84 #ifdef CONFIG_PARAVIRT 85 #include <asm/paravirt.h> 86 #else 87 #define setup_boot_clock setup_boot_APIC_clock 88 #define setup_secondary_clock setup_secondary_APIC_clock 89 #endif 90 91 #ifdef CONFIG_X86_64 92 extern int is_vsmp_box(void); 93 #else 94 static inline int is_vsmp_box(void) 95 { 96 return 0; 97 } 98 #endif 99 extern void xapic_wait_icr_idle(void); 100 extern u32 safe_xapic_wait_icr_idle(void); 101 extern void xapic_icr_write(u32, u32); 102 extern int setup_profiling_timer(unsigned int); 103 104 static inline void native_apic_mem_write(u32 reg, u32 v) 105 { 106 volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg); 107 108 alternative_io("movl %0, %1", "xchgl %0, %1", X86_FEATURE_11AP, 109 ASM_OUTPUT2("=r" (v), "=m" (*addr)), 110 ASM_OUTPUT2("0" (v), "m" (*addr))); 111 } 112 113 static inline u32 native_apic_mem_read(u32 reg) 114 { 115 return *((volatile u32 *)(APIC_BASE + reg)); 116 } 117 118 extern void native_apic_wait_icr_idle(void); 119 extern u32 native_safe_apic_wait_icr_idle(void); 120 extern void native_apic_icr_write(u32 low, u32 id); 121 extern u64 native_apic_icr_read(void); 122 123 extern int x2apic_mode; 124 125 #ifdef CONFIG_X86_X2APIC 126 /* 127 * Make previous memory operations globally visible before 128 * sending the IPI through x2apic wrmsr. We need a serializing instruction or 129 * mfence for this. 130 */ 131 static inline void x2apic_wrmsr_fence(void) 132 { 133 asm volatile("mfence" : : : "memory"); 134 } 135 136 static inline void native_apic_msr_write(u32 reg, u32 v) 137 { 138 if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || 139 reg == APIC_LVR) 140 return; 141 142 wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0); 143 } 144 145 static inline u32 native_apic_msr_read(u32 reg) 146 { 147 u32 low, high; 148 149 if (reg == APIC_DFR) 150 return -1; 151 152 rdmsr(APIC_BASE_MSR + (reg >> 4), low, high); 153 return low; 154 } 155 156 static inline void native_x2apic_wait_icr_idle(void) 157 { 158 /* no need to wait for icr idle in x2apic */ 159 return; 160 } 161 162 static inline u32 native_safe_x2apic_wait_icr_idle(void) 163 { 164 /* no need to wait for icr idle in x2apic */ 165 return 0; 166 } 167 168 static inline void native_x2apic_icr_write(u32 low, u32 id) 169 { 170 wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low); 171 } 172 173 static inline u64 native_x2apic_icr_read(void) 174 { 175 unsigned long val; 176 177 rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val); 178 return val; 179 } 180 181 extern int x2apic_phys; 182 extern void check_x2apic(void); 183 extern void enable_x2apic(void); 184 extern void x2apic_icr_write(u32 low, u32 id); 185 static inline int x2apic_enabled(void) 186 { 187 int msr, msr2; 188 189 if (!cpu_has_x2apic) 190 return 0; 191 192 rdmsr(MSR_IA32_APICBASE, msr, msr2); 193 if (msr & X2APIC_ENABLE) 194 return 1; 195 return 0; 196 } 197 198 #define x2apic_supported() (cpu_has_x2apic) 199 static inline void x2apic_force_phys(void) 200 { 201 x2apic_phys = 1; 202 } 203 #else 204 static inline void check_x2apic(void) 205 { 206 } 207 static inline void enable_x2apic(void) 208 { 209 } 210 static inline int x2apic_enabled(void) 211 { 212 return 0; 213 } 214 static inline void x2apic_force_phys(void) 215 { 216 } 217 218 #define x2apic_preenabled 0 219 #define x2apic_supported() 0 220 #endif 221 222 extern void enable_IR_x2apic(void); 223 224 extern int get_physical_broadcast(void); 225 226 extern void apic_disable(void); 227 extern int lapic_get_maxlvt(void); 228 extern void clear_local_APIC(void); 229 extern void connect_bsp_APIC(void); 230 extern void disconnect_bsp_APIC(int virt_wire_setup); 231 extern void disable_local_APIC(void); 232 extern void lapic_shutdown(void); 233 extern int verify_local_APIC(void); 234 extern void cache_APIC_registers(void); 235 extern void sync_Arb_IDs(void); 236 extern void init_bsp_APIC(void); 237 extern void setup_local_APIC(void); 238 extern void end_local_APIC_setup(void); 239 extern void init_apic_mappings(void); 240 extern void setup_boot_APIC_clock(void); 241 extern void setup_secondary_APIC_clock(void); 242 extern int APIC_init_uniprocessor(void); 243 extern void enable_NMI_through_LVT0(void); 244 245 /* 246 * On 32bit this is mach-xxx local 247 */ 248 #ifdef CONFIG_X86_64 249 extern void early_init_lapic_mapping(void); 250 extern int apic_is_clustered_box(void); 251 #else 252 static inline int apic_is_clustered_box(void) 253 { 254 return 0; 255 } 256 #endif 257 258 extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); 259 extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask); 260 261 262 #else /* !CONFIG_X86_LOCAL_APIC */ 263 static inline void lapic_shutdown(void) { } 264 #define local_apic_timer_c2_ok 1 265 static inline void init_apic_mappings(void) { } 266 static inline void disable_local_APIC(void) { } 267 static inline void apic_disable(void) { } 268 #endif /* !CONFIG_X86_LOCAL_APIC */ 269 270 #ifdef CONFIG_X86_64 271 #define SET_APIC_ID(x) (apic->set_apic_id(x)) 272 #else 273 274 #endif 275 276 /* 277 * Copyright 2004 James Cleverdon, IBM. 278 * Subject to the GNU Public License, v.2 279 * 280 * Generic APIC sub-arch data struct. 281 * 282 * Hacked for x86-64 by James Cleverdon from i386 architecture code by 283 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and 284 * James Cleverdon. 285 */ 286 struct apic { 287 char *name; 288 289 int (*probe)(void); 290 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); 291 int (*apic_id_registered)(void); 292 293 u32 irq_delivery_mode; 294 u32 irq_dest_mode; 295 296 const struct cpumask *(*target_cpus)(void); 297 298 int disable_esr; 299 300 int dest_logical; 301 unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); 302 unsigned long (*check_apicid_present)(int apicid); 303 304 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); 305 void (*init_apic_ldr)(void); 306 307 physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); 308 309 void (*setup_apic_routing)(void); 310 int (*multi_timer_check)(int apic, int irq); 311 int (*apicid_to_node)(int logical_apicid); 312 int (*cpu_to_logical_apicid)(int cpu); 313 int (*cpu_present_to_apicid)(int mps_cpu); 314 physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); 315 void (*setup_portio_remap)(void); 316 int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); 317 void (*enable_apic_mode)(void); 318 int (*phys_pkg_id)(int cpuid_apic, int index_msb); 319 320 /* 321 * When one of the next two hooks returns 1 the apic 322 * is switched to this. Essentially they are additional 323 * probe functions: 324 */ 325 int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid); 326 327 unsigned int (*get_apic_id)(unsigned long x); 328 unsigned long (*set_apic_id)(unsigned int id); 329 unsigned long apic_id_mask; 330 331 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); 332 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, 333 const struct cpumask *andmask); 334 335 /* ipi */ 336 void (*send_IPI_mask)(const struct cpumask *mask, int vector); 337 void (*send_IPI_mask_allbutself)(const struct cpumask *mask, 338 int vector); 339 void (*send_IPI_allbutself)(int vector); 340 void (*send_IPI_all)(int vector); 341 void (*send_IPI_self)(int vector); 342 343 /* wakeup_secondary_cpu */ 344 int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip); 345 346 int trampoline_phys_low; 347 int trampoline_phys_high; 348 349 void (*wait_for_init_deassert)(atomic_t *deassert); 350 void (*smp_callin_clear_local_apic)(void); 351 void (*inquire_remote_apic)(int apicid); 352 353 /* apic ops */ 354 u32 (*read)(u32 reg); 355 void (*write)(u32 reg, u32 v); 356 u64 (*icr_read)(void); 357 void (*icr_write)(u32 low, u32 high); 358 void (*wait_icr_idle)(void); 359 u32 (*safe_wait_icr_idle)(void); 360 }; 361 362 /* 363 * Pointer to the local APIC driver in use on this system (there's 364 * always just one such driver in use - the kernel decides via an 365 * early probing process which one it picks - and then sticks to it): 366 */ 367 extern struct apic *apic; 368 369 /* 370 * APIC functionality to boot other CPUs - only used on SMP: 371 */ 372 #ifdef CONFIG_SMP 373 extern atomic_t init_deasserted; 374 extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip); 375 #endif 376 377 static inline u32 apic_read(u32 reg) 378 { 379 return apic->read(reg); 380 } 381 382 static inline void apic_write(u32 reg, u32 val) 383 { 384 apic->write(reg, val); 385 } 386 387 static inline u64 apic_icr_read(void) 388 { 389 return apic->icr_read(); 390 } 391 392 static inline void apic_icr_write(u32 low, u32 high) 393 { 394 apic->icr_write(low, high); 395 } 396 397 static inline void apic_wait_icr_idle(void) 398 { 399 apic->wait_icr_idle(); 400 } 401 402 static inline u32 safe_apic_wait_icr_idle(void) 403 { 404 return apic->safe_wait_icr_idle(); 405 } 406 407 408 static inline void ack_APIC_irq(void) 409 { 410 #ifdef CONFIG_X86_LOCAL_APIC 411 /* 412 * ack_APIC_irq() actually gets compiled as a single instruction 413 * ... yummie. 414 */ 415 416 /* Docs say use 0 for future compatibility */ 417 apic_write(APIC_EOI, 0); 418 #endif 419 } 420 421 static inline unsigned default_get_apic_id(unsigned long x) 422 { 423 unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); 424 425 if (APIC_XAPIC(ver) || boot_cpu_has(X86_FEATURE_EXTD_APICID)) 426 return (x >> 24) & 0xFF; 427 else 428 return (x >> 24) & 0x0F; 429 } 430 431 /* 432 * Warm reset vector default position: 433 */ 434 #define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467 435 #define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469 436 437 #ifdef CONFIG_X86_64 438 extern struct apic apic_flat; 439 extern struct apic apic_physflat; 440 extern struct apic apic_x2apic_cluster; 441 extern struct apic apic_x2apic_phys; 442 extern int default_acpi_madt_oem_check(char *, char *); 443 444 extern void apic_send_IPI_self(int vector); 445 446 extern struct apic apic_x2apic_uv_x; 447 DECLARE_PER_CPU(int, x2apic_extra_bits); 448 449 extern int default_cpu_present_to_apicid(int mps_cpu); 450 extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid); 451 #endif 452 453 static inline void default_wait_for_init_deassert(atomic_t *deassert) 454 { 455 while (!atomic_read(deassert)) 456 cpu_relax(); 457 return; 458 } 459 460 extern void generic_bigsmp_probe(void); 461 462 463 #ifdef CONFIG_X86_LOCAL_APIC 464 465 #include <asm/smp.h> 466 467 #define APIC_DFR_VALUE (APIC_DFR_FLAT) 468 469 static inline const struct cpumask *default_target_cpus(void) 470 { 471 #ifdef CONFIG_SMP 472 return cpu_online_mask; 473 #else 474 return cpumask_of(0); 475 #endif 476 } 477 478 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); 479 480 481 static inline unsigned int read_apic_id(void) 482 { 483 unsigned int reg; 484 485 reg = apic_read(APIC_ID); 486 487 return apic->get_apic_id(reg); 488 } 489 490 extern void default_setup_apic_routing(void); 491 492 #ifdef CONFIG_X86_32 493 494 extern struct apic apic_default; 495 496 /* 497 * Set up the logical destination ID. 498 * 499 * Intel recommends to set DFR, LDR and TPR before enabling 500 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel 501 * document number 292116). So here it goes... 502 */ 503 extern void default_init_apic_ldr(void); 504 505 static inline int default_apic_id_registered(void) 506 { 507 return physid_isset(read_apic_id(), phys_cpu_present_map); 508 } 509 510 static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) 511 { 512 return cpuid_apic >> index_msb; 513 } 514 515 extern int default_apicid_to_node(int logical_apicid); 516 517 #endif 518 519 static inline unsigned int 520 default_cpu_mask_to_apicid(const struct cpumask *cpumask) 521 { 522 return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; 523 } 524 525 static inline unsigned int 526 default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 527 const struct cpumask *andmask) 528 { 529 unsigned long mask1 = cpumask_bits(cpumask)[0]; 530 unsigned long mask2 = cpumask_bits(andmask)[0]; 531 unsigned long mask3 = cpumask_bits(cpu_online_mask)[0]; 532 533 return (unsigned int)(mask1 & mask2 & mask3); 534 } 535 536 static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid) 537 { 538 return physid_isset(apicid, bitmap); 539 } 540 541 static inline unsigned long default_check_apicid_present(int bit) 542 { 543 return physid_isset(bit, phys_cpu_present_map); 544 } 545 546 static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map) 547 { 548 return phys_map; 549 } 550 551 /* Mapping from cpu number to logical apicid */ 552 static inline int default_cpu_to_logical_apicid(int cpu) 553 { 554 return 1 << cpu; 555 } 556 557 static inline int __default_cpu_present_to_apicid(int mps_cpu) 558 { 559 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) 560 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); 561 else 562 return BAD_APICID; 563 } 564 565 static inline int 566 __default_check_phys_apicid_present(int boot_cpu_physical_apicid) 567 { 568 return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); 569 } 570 571 #ifdef CONFIG_X86_32 572 static inline int default_cpu_present_to_apicid(int mps_cpu) 573 { 574 return __default_cpu_present_to_apicid(mps_cpu); 575 } 576 577 static inline int 578 default_check_phys_apicid_present(int boot_cpu_physical_apicid) 579 { 580 return __default_check_phys_apicid_present(boot_cpu_physical_apicid); 581 } 582 #else 583 extern int default_cpu_present_to_apicid(int mps_cpu); 584 extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid); 585 #endif 586 587 static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid) 588 { 589 return physid_mask_of_physid(phys_apicid); 590 } 591 592 #endif /* CONFIG_X86_LOCAL_APIC */ 593 594 #ifdef CONFIG_X86_32 595 extern u8 cpu_2_logical_apicid[NR_CPUS]; 596 #endif 597 598 #endif /* _ASM_X86_APIC_H */ 599