xref: /openbmc/linux/arch/x86/kernel/apic/apic.c (revision 483eb062)
1 /*
2  *	Local APIC handling, local APIC timers
3  *
4  *	(c) 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
5  *
6  *	Fixes
7  *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
8  *					thanks to Eric Gilmore
9  *					and Rolf G. Tews
10  *					for testing these extensively.
11  *	Maciej W. Rozycki	:	Various updates and fixes.
12  *	Mikael Pettersson	:	Power Management for UP-APIC.
13  *	Pavel Machek and
14  *	Mikael Pettersson	:	PM converted to driver model.
15  */
16 
17 #include <linux/perf_event.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/mc146818rtc.h>
20 #include <linux/acpi_pmtmr.h>
21 #include <linux/clockchips.h>
22 #include <linux/interrupt.h>
23 #include <linux/bootmem.h>
24 #include <linux/ftrace.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/syscore_ops.h>
28 #include <linux/delay.h>
29 #include <linux/timex.h>
30 #include <linux/i8253.h>
31 #include <linux/dmar.h>
32 #include <linux/init.h>
33 #include <linux/cpu.h>
34 #include <linux/dmi.h>
35 #include <linux/smp.h>
36 #include <linux/mm.h>
37 
38 #include <asm/trace/irq_vectors.h>
39 #include <asm/irq_remapping.h>
40 #include <asm/perf_event.h>
41 #include <asm/x86_init.h>
42 #include <asm/pgalloc.h>
43 #include <linux/atomic.h>
44 #include <asm/mpspec.h>
45 #include <asm/i8259.h>
46 #include <asm/proto.h>
47 #include <asm/apic.h>
48 #include <asm/io_apic.h>
49 #include <asm/desc.h>
50 #include <asm/hpet.h>
51 #include <asm/idle.h>
52 #include <asm/mtrr.h>
53 #include <asm/time.h>
54 #include <asm/smp.h>
55 #include <asm/mce.h>
56 #include <asm/tsc.h>
57 #include <asm/hypervisor.h>
58 
59 unsigned int num_processors;
60 
61 unsigned disabled_cpus;
62 
63 /* Processor that is doing the boot up */
64 unsigned int boot_cpu_physical_apicid = -1U;
65 EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid);
66 
67 /*
68  * The highest APIC ID seen during enumeration.
69  */
70 unsigned int max_physical_apicid;
71 
72 /*
73  * Bitmask of physically existing CPUs:
74  */
75 physid_mask_t phys_cpu_present_map;
76 
77 /*
78  * Processor to be disabled specified by kernel parameter
79  * disable_cpu_apicid=<int>, mostly used for the kdump 2nd kernel to
80  * avoid undefined behaviour caused by sending INIT from AP to BSP.
81  */
82 static unsigned int disabled_cpu_apicid __read_mostly = BAD_APICID;
83 
84 /*
85  * Map cpu index to physical APIC ID
86  */
87 DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID);
88 DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid, BAD_APICID);
89 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
90 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
91 
92 #ifdef CONFIG_X86_32
93 
94 /*
95  * On x86_32, the mapping between cpu and logical apicid may vary
96  * depending on apic in use.  The following early percpu variable is
97  * used for the mapping.  This is where the behaviors of x86_64 and 32
98  * actually diverge.  Let's keep it ugly for now.
99  */
100 DEFINE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid, BAD_APICID);
101 
102 /* Local APIC was disabled by the BIOS and enabled by the kernel */
103 static int enabled_via_apicbase;
104 
105 /*
106  * Handle interrupt mode configuration register (IMCR).
107  * This register controls whether the interrupt signals
108  * that reach the BSP come from the master PIC or from the
109  * local APIC. Before entering Symmetric I/O Mode, either
110  * the BIOS or the operating system must switch out of
111  * PIC Mode by changing the IMCR.
112  */
113 static inline void imcr_pic_to_apic(void)
114 {
115 	/* select IMCR register */
116 	outb(0x70, 0x22);
117 	/* NMI and 8259 INTR go through APIC */
118 	outb(0x01, 0x23);
119 }
120 
121 static inline void imcr_apic_to_pic(void)
122 {
123 	/* select IMCR register */
124 	outb(0x70, 0x22);
125 	/* NMI and 8259 INTR go directly to BSP */
126 	outb(0x00, 0x23);
127 }
128 #endif
129 
130 /*
131  * Knob to control our willingness to enable the local APIC.
132  *
133  * +1=force-enable
134  */
135 static int force_enable_local_apic __initdata;
136 /*
137  * APIC command line parameters
138  */
139 static int __init parse_lapic(char *arg)
140 {
141 	if (config_enabled(CONFIG_X86_32) && !arg)
142 		force_enable_local_apic = 1;
143 	else if (arg && !strncmp(arg, "notscdeadline", 13))
144 		setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
145 	return 0;
146 }
147 early_param("lapic", parse_lapic);
148 
149 #ifdef CONFIG_X86_64
150 static int apic_calibrate_pmtmr __initdata;
151 static __init int setup_apicpmtimer(char *s)
152 {
153 	apic_calibrate_pmtmr = 1;
154 	notsc_setup(NULL);
155 	return 0;
156 }
157 __setup("apicpmtimer", setup_apicpmtimer);
158 #endif
159 
160 int x2apic_mode;
161 #ifdef CONFIG_X86_X2APIC
162 /* x2apic enabled before OS handover */
163 int x2apic_preenabled;
164 static int x2apic_disabled;
165 static int nox2apic;
166 static __init int setup_nox2apic(char *str)
167 {
168 	if (x2apic_enabled()) {
169 		int apicid = native_apic_msr_read(APIC_ID);
170 
171 		if (apicid >= 255) {
172 			pr_warning("Apicid: %08x, cannot enforce nox2apic\n",
173 				   apicid);
174 			return 0;
175 		}
176 
177 		pr_warning("x2apic already enabled. will disable it\n");
178 	} else
179 		setup_clear_cpu_cap(X86_FEATURE_X2APIC);
180 
181 	nox2apic = 1;
182 
183 	return 0;
184 }
185 early_param("nox2apic", setup_nox2apic);
186 #endif
187 
188 unsigned long mp_lapic_addr;
189 int disable_apic;
190 /* Disable local APIC timer from the kernel commandline or via dmi quirk */
191 static int disable_apic_timer __initdata;
192 /* Local APIC timer works in C2 */
193 int local_apic_timer_c2_ok;
194 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
195 
196 int first_system_vector = 0xfe;
197 
198 /*
199  * Debug level, exported for io_apic.c
200  */
201 unsigned int apic_verbosity;
202 
203 int pic_mode;
204 
205 /* Have we found an MP table */
206 int smp_found_config;
207 
208 static struct resource lapic_resource = {
209 	.name = "Local APIC",
210 	.flags = IORESOURCE_MEM | IORESOURCE_BUSY,
211 };
212 
213 unsigned int lapic_timer_frequency = 0;
214 
215 static void apic_pm_activate(void);
216 
217 static unsigned long apic_phys;
218 
219 /*
220  * Get the LAPIC version
221  */
222 static inline int lapic_get_version(void)
223 {
224 	return GET_APIC_VERSION(apic_read(APIC_LVR));
225 }
226 
227 /*
228  * Check, if the APIC is integrated or a separate chip
229  */
230 static inline int lapic_is_integrated(void)
231 {
232 #ifdef CONFIG_X86_64
233 	return 1;
234 #else
235 	return APIC_INTEGRATED(lapic_get_version());
236 #endif
237 }
238 
239 /*
240  * Check, whether this is a modern or a first generation APIC
241  */
242 static int modern_apic(void)
243 {
244 	/* AMD systems use old APIC versions, so check the CPU */
245 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
246 	    boot_cpu_data.x86 >= 0xf)
247 		return 1;
248 	return lapic_get_version() >= 0x14;
249 }
250 
251 /*
252  * right after this call apic become NOOP driven
253  * so apic->write/read doesn't do anything
254  */
255 static void __init apic_disable(void)
256 {
257 	pr_info("APIC: switched to apic NOOP\n");
258 	apic = &apic_noop;
259 }
260 
261 void native_apic_wait_icr_idle(void)
262 {
263 	while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
264 		cpu_relax();
265 }
266 
267 u32 native_safe_apic_wait_icr_idle(void)
268 {
269 	u32 send_status;
270 	int timeout;
271 
272 	timeout = 0;
273 	do {
274 		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
275 		if (!send_status)
276 			break;
277 		inc_irq_stat(icr_read_retry_count);
278 		udelay(100);
279 	} while (timeout++ < 1000);
280 
281 	return send_status;
282 }
283 
284 void native_apic_icr_write(u32 low, u32 id)
285 {
286 	apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
287 	apic_write(APIC_ICR, low);
288 }
289 
290 u64 native_apic_icr_read(void)
291 {
292 	u32 icr1, icr2;
293 
294 	icr2 = apic_read(APIC_ICR2);
295 	icr1 = apic_read(APIC_ICR);
296 
297 	return icr1 | ((u64)icr2 << 32);
298 }
299 
300 #ifdef CONFIG_X86_32
301 /**
302  * get_physical_broadcast - Get number of physical broadcast IDs
303  */
304 int get_physical_broadcast(void)
305 {
306 	return modern_apic() ? 0xff : 0xf;
307 }
308 #endif
309 
310 /**
311  * lapic_get_maxlvt - get the maximum number of local vector table entries
312  */
313 int lapic_get_maxlvt(void)
314 {
315 	unsigned int v;
316 
317 	v = apic_read(APIC_LVR);
318 	/*
319 	 * - we always have APIC integrated on 64bit mode
320 	 * - 82489DXs do not report # of LVT entries
321 	 */
322 	return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2;
323 }
324 
325 /*
326  * Local APIC timer
327  */
328 
329 /* Clock divisor */
330 #define APIC_DIVISOR 16
331 #define TSC_DIVISOR  32
332 
333 /*
334  * This function sets up the local APIC timer, with a timeout of
335  * 'clocks' APIC bus clock. During calibration we actually call
336  * this function twice on the boot CPU, once with a bogus timeout
337  * value, second time for real. The other (noncalibrating) CPUs
338  * call this function only once, with the real, calibrated value.
339  *
340  * We do reads before writes even if unnecessary, to get around the
341  * P5 APIC double write bug.
342  */
343 static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
344 {
345 	unsigned int lvtt_value, tmp_value;
346 
347 	lvtt_value = LOCAL_TIMER_VECTOR;
348 	if (!oneshot)
349 		lvtt_value |= APIC_LVT_TIMER_PERIODIC;
350 	else if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
351 		lvtt_value |= APIC_LVT_TIMER_TSCDEADLINE;
352 
353 	if (!lapic_is_integrated())
354 		lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
355 
356 	if (!irqen)
357 		lvtt_value |= APIC_LVT_MASKED;
358 
359 	apic_write(APIC_LVTT, lvtt_value);
360 
361 	if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
362 		printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
363 		return;
364 	}
365 
366 	/*
367 	 * Divide PICLK by 16
368 	 */
369 	tmp_value = apic_read(APIC_TDCR);
370 	apic_write(APIC_TDCR,
371 		(tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
372 		APIC_TDR_DIV_16);
373 
374 	if (!oneshot)
375 		apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
376 }
377 
378 /*
379  * Setup extended LVT, AMD specific
380  *
381  * Software should use the LVT offsets the BIOS provides.  The offsets
382  * are determined by the subsystems using it like those for MCE
383  * threshold or IBS.  On K8 only offset 0 (APIC500) and MCE interrupts
384  * are supported. Beginning with family 10h at least 4 offsets are
385  * available.
386  *
387  * Since the offsets must be consistent for all cores, we keep track
388  * of the LVT offsets in software and reserve the offset for the same
389  * vector also to be used on other cores. An offset is freed by
390  * setting the entry to APIC_EILVT_MASKED.
391  *
392  * If the BIOS is right, there should be no conflicts. Otherwise a
393  * "[Firmware Bug]: ..." error message is generated. However, if
394  * software does not properly determines the offsets, it is not
395  * necessarily a BIOS bug.
396  */
397 
398 static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX];
399 
400 static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new)
401 {
402 	return (old & APIC_EILVT_MASKED)
403 		|| (new == APIC_EILVT_MASKED)
404 		|| ((new & ~APIC_EILVT_MASKED) == old);
405 }
406 
407 static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
408 {
409 	unsigned int rsvd, vector;
410 
411 	if (offset >= APIC_EILVT_NR_MAX)
412 		return ~0;
413 
414 	rsvd = atomic_read(&eilvt_offsets[offset]);
415 	do {
416 		vector = rsvd & ~APIC_EILVT_MASKED;	/* 0: unassigned */
417 		if (vector && !eilvt_entry_is_changeable(vector, new))
418 			/* may not change if vectors are different */
419 			return rsvd;
420 		rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
421 	} while (rsvd != new);
422 
423 	rsvd &= ~APIC_EILVT_MASKED;
424 	if (rsvd && rsvd != vector)
425 		pr_info("LVT offset %d assigned for vector 0x%02x\n",
426 			offset, rsvd);
427 
428 	return new;
429 }
430 
431 /*
432  * If mask=1, the LVT entry does not generate interrupts while mask=0
433  * enables the vector. See also the BKDGs. Must be called with
434  * preemption disabled.
435  */
436 
437 int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
438 {
439 	unsigned long reg = APIC_EILVTn(offset);
440 	unsigned int new, old, reserved;
441 
442 	new = (mask << 16) | (msg_type << 8) | vector;
443 	old = apic_read(reg);
444 	reserved = reserve_eilvt_offset(offset, new);
445 
446 	if (reserved != new) {
447 		pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
448 		       "vector 0x%x, but the register is already in use for "
449 		       "vector 0x%x on another cpu\n",
450 		       smp_processor_id(), reg, offset, new, reserved);
451 		return -EINVAL;
452 	}
453 
454 	if (!eilvt_entry_is_changeable(old, new)) {
455 		pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
456 		       "vector 0x%x, but the register is already in use for "
457 		       "vector 0x%x on this cpu\n",
458 		       smp_processor_id(), reg, offset, new, old);
459 		return -EBUSY;
460 	}
461 
462 	apic_write(reg, new);
463 
464 	return 0;
465 }
466 EXPORT_SYMBOL_GPL(setup_APIC_eilvt);
467 
468 /*
469  * Program the next event, relative to now
470  */
471 static int lapic_next_event(unsigned long delta,
472 			    struct clock_event_device *evt)
473 {
474 	apic_write(APIC_TMICT, delta);
475 	return 0;
476 }
477 
478 static int lapic_next_deadline(unsigned long delta,
479 			       struct clock_event_device *evt)
480 {
481 	u64 tsc;
482 
483 	rdtscll(tsc);
484 	wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
485 	return 0;
486 }
487 
488 /*
489  * Setup the lapic timer in periodic or oneshot mode
490  */
491 static void lapic_timer_setup(enum clock_event_mode mode,
492 			      struct clock_event_device *evt)
493 {
494 	unsigned long flags;
495 	unsigned int v;
496 
497 	/* Lapic used as dummy for broadcast ? */
498 	if (evt->features & CLOCK_EVT_FEAT_DUMMY)
499 		return;
500 
501 	local_irq_save(flags);
502 
503 	switch (mode) {
504 	case CLOCK_EVT_MODE_PERIODIC:
505 	case CLOCK_EVT_MODE_ONESHOT:
506 		__setup_APIC_LVTT(lapic_timer_frequency,
507 				  mode != CLOCK_EVT_MODE_PERIODIC, 1);
508 		break;
509 	case CLOCK_EVT_MODE_UNUSED:
510 	case CLOCK_EVT_MODE_SHUTDOWN:
511 		v = apic_read(APIC_LVTT);
512 		v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
513 		apic_write(APIC_LVTT, v);
514 		apic_write(APIC_TMICT, 0);
515 		break;
516 	case CLOCK_EVT_MODE_RESUME:
517 		/* Nothing to do here */
518 		break;
519 	}
520 
521 	local_irq_restore(flags);
522 }
523 
524 /*
525  * Local APIC timer broadcast function
526  */
527 static void lapic_timer_broadcast(const struct cpumask *mask)
528 {
529 #ifdef CONFIG_SMP
530 	apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
531 #endif
532 }
533 
534 
535 /*
536  * The local apic timer can be used for any function which is CPU local.
537  */
538 static struct clock_event_device lapic_clockevent = {
539 	.name		= "lapic",
540 	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
541 			| CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
542 	.shift		= 32,
543 	.set_mode	= lapic_timer_setup,
544 	.set_next_event	= lapic_next_event,
545 	.broadcast	= lapic_timer_broadcast,
546 	.rating		= 100,
547 	.irq		= -1,
548 };
549 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
550 
551 /*
552  * Setup the local APIC timer for this CPU. Copy the initialized values
553  * of the boot CPU and register the clock event in the framework.
554  */
555 static void setup_APIC_timer(void)
556 {
557 	struct clock_event_device *levt = &__get_cpu_var(lapic_events);
558 
559 	if (this_cpu_has(X86_FEATURE_ARAT)) {
560 		lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
561 		/* Make LAPIC timer preferrable over percpu HPET */
562 		lapic_clockevent.rating = 150;
563 	}
564 
565 	memcpy(levt, &lapic_clockevent, sizeof(*levt));
566 	levt->cpumask = cpumask_of(smp_processor_id());
567 
568 	if (this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
569 		levt->features &= ~(CLOCK_EVT_FEAT_PERIODIC |
570 				    CLOCK_EVT_FEAT_DUMMY);
571 		levt->set_next_event = lapic_next_deadline;
572 		clockevents_config_and_register(levt,
573 						(tsc_khz / TSC_DIVISOR) * 1000,
574 						0xF, ~0UL);
575 	} else
576 		clockevents_register_device(levt);
577 }
578 
579 /*
580  * In this functions we calibrate APIC bus clocks to the external timer.
581  *
582  * We want to do the calibration only once since we want to have local timer
583  * irqs syncron. CPUs connected by the same APIC bus have the very same bus
584  * frequency.
585  *
586  * This was previously done by reading the PIT/HPET and waiting for a wrap
587  * around to find out, that a tick has elapsed. I have a box, where the PIT
588  * readout is broken, so it never gets out of the wait loop again. This was
589  * also reported by others.
590  *
591  * Monitoring the jiffies value is inaccurate and the clockevents
592  * infrastructure allows us to do a simple substitution of the interrupt
593  * handler.
594  *
595  * The calibration routine also uses the pm_timer when possible, as the PIT
596  * happens to run way too slow (factor 2.3 on my VAIO CoreDuo, which goes
597  * back to normal later in the boot process).
598  */
599 
600 #define LAPIC_CAL_LOOPS		(HZ/10)
601 
602 static __initdata int lapic_cal_loops = -1;
603 static __initdata long lapic_cal_t1, lapic_cal_t2;
604 static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
605 static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
606 static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
607 
608 /*
609  * Temporary interrupt handler.
610  */
611 static void __init lapic_cal_handler(struct clock_event_device *dev)
612 {
613 	unsigned long long tsc = 0;
614 	long tapic = apic_read(APIC_TMCCT);
615 	unsigned long pm = acpi_pm_read_early();
616 
617 	if (cpu_has_tsc)
618 		rdtscll(tsc);
619 
620 	switch (lapic_cal_loops++) {
621 	case 0:
622 		lapic_cal_t1 = tapic;
623 		lapic_cal_tsc1 = tsc;
624 		lapic_cal_pm1 = pm;
625 		lapic_cal_j1 = jiffies;
626 		break;
627 
628 	case LAPIC_CAL_LOOPS:
629 		lapic_cal_t2 = tapic;
630 		lapic_cal_tsc2 = tsc;
631 		if (pm < lapic_cal_pm1)
632 			pm += ACPI_PM_OVRRUN;
633 		lapic_cal_pm2 = pm;
634 		lapic_cal_j2 = jiffies;
635 		break;
636 	}
637 }
638 
639 static int __init
640 calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
641 {
642 	const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
643 	const long pm_thresh = pm_100ms / 100;
644 	unsigned long mult;
645 	u64 res;
646 
647 #ifndef CONFIG_X86_PM_TIMER
648 	return -1;
649 #endif
650 
651 	apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm);
652 
653 	/* Check, if the PM timer is available */
654 	if (!deltapm)
655 		return -1;
656 
657 	mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
658 
659 	if (deltapm > (pm_100ms - pm_thresh) &&
660 	    deltapm < (pm_100ms + pm_thresh)) {
661 		apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n");
662 		return 0;
663 	}
664 
665 	res = (((u64)deltapm) *  mult) >> 22;
666 	do_div(res, 1000000);
667 	pr_warning("APIC calibration not consistent "
668 		   "with PM-Timer: %ldms instead of 100ms\n",(long)res);
669 
670 	/* Correct the lapic counter value */
671 	res = (((u64)(*delta)) * pm_100ms);
672 	do_div(res, deltapm);
673 	pr_info("APIC delta adjusted to PM-Timer: "
674 		"%lu (%ld)\n", (unsigned long)res, *delta);
675 	*delta = (long)res;
676 
677 	/* Correct the tsc counter value */
678 	if (cpu_has_tsc) {
679 		res = (((u64)(*deltatsc)) * pm_100ms);
680 		do_div(res, deltapm);
681 		apic_printk(APIC_VERBOSE, "TSC delta adjusted to "
682 					  "PM-Timer: %lu (%ld)\n",
683 					(unsigned long)res, *deltatsc);
684 		*deltatsc = (long)res;
685 	}
686 
687 	return 0;
688 }
689 
690 static int __init calibrate_APIC_clock(void)
691 {
692 	struct clock_event_device *levt = &__get_cpu_var(lapic_events);
693 	void (*real_handler)(struct clock_event_device *dev);
694 	unsigned long deltaj;
695 	long delta, deltatsc;
696 	int pm_referenced = 0;
697 
698 	/**
699 	 * check if lapic timer has already been calibrated by platform
700 	 * specific routine, such as tsc calibration code. if so, we just fill
701 	 * in the clockevent structure and return.
702 	 */
703 
704 	if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
705 		return 0;
706 	} else if (lapic_timer_frequency) {
707 		apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n",
708 				lapic_timer_frequency);
709 		lapic_clockevent.mult = div_sc(lapic_timer_frequency/APIC_DIVISOR,
710 					TICK_NSEC, lapic_clockevent.shift);
711 		lapic_clockevent.max_delta_ns =
712 			clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
713 		lapic_clockevent.min_delta_ns =
714 			clockevent_delta2ns(0xF, &lapic_clockevent);
715 		lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
716 		return 0;
717 	}
718 
719 	apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
720 		    "calibrating APIC timer ...\n");
721 
722 	local_irq_disable();
723 
724 	/* Replace the global interrupt handler */
725 	real_handler = global_clock_event->event_handler;
726 	global_clock_event->event_handler = lapic_cal_handler;
727 
728 	/*
729 	 * Setup the APIC counter to maximum. There is no way the lapic
730 	 * can underflow in the 100ms detection time frame
731 	 */
732 	__setup_APIC_LVTT(0xffffffff, 0, 0);
733 
734 	/* Let the interrupts run */
735 	local_irq_enable();
736 
737 	while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
738 		cpu_relax();
739 
740 	local_irq_disable();
741 
742 	/* Restore the real event handler */
743 	global_clock_event->event_handler = real_handler;
744 
745 	/* Build delta t1-t2 as apic timer counts down */
746 	delta = lapic_cal_t1 - lapic_cal_t2;
747 	apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
748 
749 	deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
750 
751 	/* we trust the PM based calibration if possible */
752 	pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
753 					&delta, &deltatsc);
754 
755 	/* Calculate the scaled math multiplication factor */
756 	lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
757 				       lapic_clockevent.shift);
758 	lapic_clockevent.max_delta_ns =
759 		clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
760 	lapic_clockevent.min_delta_ns =
761 		clockevent_delta2ns(0xF, &lapic_clockevent);
762 
763 	lapic_timer_frequency = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
764 
765 	apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
766 	apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult);
767 	apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
768 		    lapic_timer_frequency);
769 
770 	if (cpu_has_tsc) {
771 		apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
772 			    "%ld.%04ld MHz.\n",
773 			    (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
774 			    (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ));
775 	}
776 
777 	apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
778 		    "%u.%04u MHz.\n",
779 		    lapic_timer_frequency / (1000000 / HZ),
780 		    lapic_timer_frequency % (1000000 / HZ));
781 
782 	/*
783 	 * Do a sanity check on the APIC calibration result
784 	 */
785 	if (lapic_timer_frequency < (1000000 / HZ)) {
786 		local_irq_enable();
787 		pr_warning("APIC frequency too slow, disabling apic timer\n");
788 		return -1;
789 	}
790 
791 	levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
792 
793 	/*
794 	 * PM timer calibration failed or not turned on
795 	 * so lets try APIC timer based calibration
796 	 */
797 	if (!pm_referenced) {
798 		apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
799 
800 		/*
801 		 * Setup the apic timer manually
802 		 */
803 		levt->event_handler = lapic_cal_handler;
804 		lapic_timer_setup(CLOCK_EVT_MODE_PERIODIC, levt);
805 		lapic_cal_loops = -1;
806 
807 		/* Let the interrupts run */
808 		local_irq_enable();
809 
810 		while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
811 			cpu_relax();
812 
813 		/* Stop the lapic timer */
814 		lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt);
815 
816 		/* Jiffies delta */
817 		deltaj = lapic_cal_j2 - lapic_cal_j1;
818 		apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj);
819 
820 		/* Check, if the jiffies result is consistent */
821 		if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
822 			apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
823 		else
824 			levt->features |= CLOCK_EVT_FEAT_DUMMY;
825 	} else
826 		local_irq_enable();
827 
828 	if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
829 		pr_warning("APIC timer disabled due to verification failure\n");
830 			return -1;
831 	}
832 
833 	return 0;
834 }
835 
836 /*
837  * Setup the boot APIC
838  *
839  * Calibrate and verify the result.
840  */
841 void __init setup_boot_APIC_clock(void)
842 {
843 	/*
844 	 * The local apic timer can be disabled via the kernel
845 	 * commandline or from the CPU detection code. Register the lapic
846 	 * timer as a dummy clock event source on SMP systems, so the
847 	 * broadcast mechanism is used. On UP systems simply ignore it.
848 	 */
849 	if (disable_apic_timer) {
850 		pr_info("Disabling APIC timer\n");
851 		/* No broadcast on UP ! */
852 		if (num_possible_cpus() > 1) {
853 			lapic_clockevent.mult = 1;
854 			setup_APIC_timer();
855 		}
856 		return;
857 	}
858 
859 	if (calibrate_APIC_clock()) {
860 		/* No broadcast on UP ! */
861 		if (num_possible_cpus() > 1)
862 			setup_APIC_timer();
863 		return;
864 	}
865 
866 	/*
867 	 * If nmi_watchdog is set to IO_APIC, we need the
868 	 * PIT/HPET going.  Otherwise register lapic as a dummy
869 	 * device.
870 	 */
871 	lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
872 
873 	/* Setup the lapic or request the broadcast */
874 	setup_APIC_timer();
875 }
876 
877 void setup_secondary_APIC_clock(void)
878 {
879 	setup_APIC_timer();
880 }
881 
882 /*
883  * The guts of the apic timer interrupt
884  */
885 static void local_apic_timer_interrupt(void)
886 {
887 	int cpu = smp_processor_id();
888 	struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
889 
890 	/*
891 	 * Normally we should not be here till LAPIC has been initialized but
892 	 * in some cases like kdump, its possible that there is a pending LAPIC
893 	 * timer interrupt from previous kernel's context and is delivered in
894 	 * new kernel the moment interrupts are enabled.
895 	 *
896 	 * Interrupts are enabled early and LAPIC is setup much later, hence
897 	 * its possible that when we get here evt->event_handler is NULL.
898 	 * Check for event_handler being NULL and discard the interrupt as
899 	 * spurious.
900 	 */
901 	if (!evt->event_handler) {
902 		pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu);
903 		/* Switch it off */
904 		lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
905 		return;
906 	}
907 
908 	/*
909 	 * the NMI deadlock-detector uses this.
910 	 */
911 	inc_irq_stat(apic_timer_irqs);
912 
913 	evt->event_handler(evt);
914 }
915 
916 /*
917  * Local APIC timer interrupt. This is the most natural way for doing
918  * local interrupts, but local timer interrupts can be emulated by
919  * broadcast interrupts too. [in case the hw doesn't support APIC timers]
920  *
921  * [ if a single-CPU system runs an SMP kernel then we call the local
922  *   interrupt as well. Thus we cannot inline the local irq ... ]
923  */
924 __visible void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
925 {
926 	struct pt_regs *old_regs = set_irq_regs(regs);
927 
928 	/*
929 	 * NOTE! We'd better ACK the irq immediately,
930 	 * because timer handling can be slow.
931 	 *
932 	 * update_process_times() expects us to have done irq_enter().
933 	 * Besides, if we don't timer interrupts ignore the global
934 	 * interrupt lock, which is the WrongThing (tm) to do.
935 	 */
936 	entering_ack_irq();
937 	local_apic_timer_interrupt();
938 	exiting_irq();
939 
940 	set_irq_regs(old_regs);
941 }
942 
943 __visible void __irq_entry smp_trace_apic_timer_interrupt(struct pt_regs *regs)
944 {
945 	struct pt_regs *old_regs = set_irq_regs(regs);
946 
947 	/*
948 	 * NOTE! We'd better ACK the irq immediately,
949 	 * because timer handling can be slow.
950 	 *
951 	 * update_process_times() expects us to have done irq_enter().
952 	 * Besides, if we don't timer interrupts ignore the global
953 	 * interrupt lock, which is the WrongThing (tm) to do.
954 	 */
955 	entering_ack_irq();
956 	trace_local_timer_entry(LOCAL_TIMER_VECTOR);
957 	local_apic_timer_interrupt();
958 	trace_local_timer_exit(LOCAL_TIMER_VECTOR);
959 	exiting_irq();
960 
961 	set_irq_regs(old_regs);
962 }
963 
964 int setup_profiling_timer(unsigned int multiplier)
965 {
966 	return -EINVAL;
967 }
968 
969 /*
970  * Local APIC start and shutdown
971  */
972 
973 /**
974  * clear_local_APIC - shutdown the local APIC
975  *
976  * This is called, when a CPU is disabled and before rebooting, so the state of
977  * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
978  * leftovers during boot.
979  */
980 void clear_local_APIC(void)
981 {
982 	int maxlvt;
983 	u32 v;
984 
985 	/* APIC hasn't been mapped yet */
986 	if (!x2apic_mode && !apic_phys)
987 		return;
988 
989 	maxlvt = lapic_get_maxlvt();
990 	/*
991 	 * Masking an LVT entry can trigger a local APIC error
992 	 * if the vector is zero. Mask LVTERR first to prevent this.
993 	 */
994 	if (maxlvt >= 3) {
995 		v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
996 		apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
997 	}
998 	/*
999 	 * Careful: we have to set masks only first to deassert
1000 	 * any level-triggered sources.
1001 	 */
1002 	v = apic_read(APIC_LVTT);
1003 	apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
1004 	v = apic_read(APIC_LVT0);
1005 	apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1006 	v = apic_read(APIC_LVT1);
1007 	apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
1008 	if (maxlvt >= 4) {
1009 		v = apic_read(APIC_LVTPC);
1010 		apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
1011 	}
1012 
1013 	/* lets not touch this if we didn't frob it */
1014 #ifdef CONFIG_X86_THERMAL_VECTOR
1015 	if (maxlvt >= 5) {
1016 		v = apic_read(APIC_LVTTHMR);
1017 		apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
1018 	}
1019 #endif
1020 #ifdef CONFIG_X86_MCE_INTEL
1021 	if (maxlvt >= 6) {
1022 		v = apic_read(APIC_LVTCMCI);
1023 		if (!(v & APIC_LVT_MASKED))
1024 			apic_write(APIC_LVTCMCI, v | APIC_LVT_MASKED);
1025 	}
1026 #endif
1027 
1028 	/*
1029 	 * Clean APIC state for other OSs:
1030 	 */
1031 	apic_write(APIC_LVTT, APIC_LVT_MASKED);
1032 	apic_write(APIC_LVT0, APIC_LVT_MASKED);
1033 	apic_write(APIC_LVT1, APIC_LVT_MASKED);
1034 	if (maxlvt >= 3)
1035 		apic_write(APIC_LVTERR, APIC_LVT_MASKED);
1036 	if (maxlvt >= 4)
1037 		apic_write(APIC_LVTPC, APIC_LVT_MASKED);
1038 
1039 	/* Integrated APIC (!82489DX) ? */
1040 	if (lapic_is_integrated()) {
1041 		if (maxlvt > 3)
1042 			/* Clear ESR due to Pentium errata 3AP and 11AP */
1043 			apic_write(APIC_ESR, 0);
1044 		apic_read(APIC_ESR);
1045 	}
1046 }
1047 
1048 /**
1049  * disable_local_APIC - clear and disable the local APIC
1050  */
1051 void disable_local_APIC(void)
1052 {
1053 	unsigned int value;
1054 
1055 	/* APIC hasn't been mapped yet */
1056 	if (!x2apic_mode && !apic_phys)
1057 		return;
1058 
1059 	clear_local_APIC();
1060 
1061 	/*
1062 	 * Disable APIC (implies clearing of registers
1063 	 * for 82489DX!).
1064 	 */
1065 	value = apic_read(APIC_SPIV);
1066 	value &= ~APIC_SPIV_APIC_ENABLED;
1067 	apic_write(APIC_SPIV, value);
1068 
1069 #ifdef CONFIG_X86_32
1070 	/*
1071 	 * When LAPIC was disabled by the BIOS and enabled by the kernel,
1072 	 * restore the disabled state.
1073 	 */
1074 	if (enabled_via_apicbase) {
1075 		unsigned int l, h;
1076 
1077 		rdmsr(MSR_IA32_APICBASE, l, h);
1078 		l &= ~MSR_IA32_APICBASE_ENABLE;
1079 		wrmsr(MSR_IA32_APICBASE, l, h);
1080 	}
1081 #endif
1082 }
1083 
1084 /*
1085  * If Linux enabled the LAPIC against the BIOS default disable it down before
1086  * re-entering the BIOS on shutdown.  Otherwise the BIOS may get confused and
1087  * not power-off.  Additionally clear all LVT entries before disable_local_APIC
1088  * for the case where Linux didn't enable the LAPIC.
1089  */
1090 void lapic_shutdown(void)
1091 {
1092 	unsigned long flags;
1093 
1094 	if (!cpu_has_apic && !apic_from_smp_config())
1095 		return;
1096 
1097 	local_irq_save(flags);
1098 
1099 #ifdef CONFIG_X86_32
1100 	if (!enabled_via_apicbase)
1101 		clear_local_APIC();
1102 	else
1103 #endif
1104 		disable_local_APIC();
1105 
1106 
1107 	local_irq_restore(flags);
1108 }
1109 
1110 /*
1111  * This is to verify that we're looking at a real local APIC.
1112  * Check these against your board if the CPUs aren't getting
1113  * started for no apparent reason.
1114  */
1115 int __init verify_local_APIC(void)
1116 {
1117 	unsigned int reg0, reg1;
1118 
1119 	/*
1120 	 * The version register is read-only in a real APIC.
1121 	 */
1122 	reg0 = apic_read(APIC_LVR);
1123 	apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
1124 	apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
1125 	reg1 = apic_read(APIC_LVR);
1126 	apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
1127 
1128 	/*
1129 	 * The two version reads above should print the same
1130 	 * numbers.  If the second one is different, then we
1131 	 * poke at a non-APIC.
1132 	 */
1133 	if (reg1 != reg0)
1134 		return 0;
1135 
1136 	/*
1137 	 * Check if the version looks reasonably.
1138 	 */
1139 	reg1 = GET_APIC_VERSION(reg0);
1140 	if (reg1 == 0x00 || reg1 == 0xff)
1141 		return 0;
1142 	reg1 = lapic_get_maxlvt();
1143 	if (reg1 < 0x02 || reg1 == 0xff)
1144 		return 0;
1145 
1146 	/*
1147 	 * The ID register is read/write in a real APIC.
1148 	 */
1149 	reg0 = apic_read(APIC_ID);
1150 	apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
1151 	apic_write(APIC_ID, reg0 ^ apic->apic_id_mask);
1152 	reg1 = apic_read(APIC_ID);
1153 	apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
1154 	apic_write(APIC_ID, reg0);
1155 	if (reg1 != (reg0 ^ apic->apic_id_mask))
1156 		return 0;
1157 
1158 	/*
1159 	 * The next two are just to see if we have sane values.
1160 	 * They're only really relevant if we're in Virtual Wire
1161 	 * compatibility mode, but most boxes are anymore.
1162 	 */
1163 	reg0 = apic_read(APIC_LVT0);
1164 	apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
1165 	reg1 = apic_read(APIC_LVT1);
1166 	apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
1167 
1168 	return 1;
1169 }
1170 
1171 /**
1172  * sync_Arb_IDs - synchronize APIC bus arbitration IDs
1173  */
1174 void __init sync_Arb_IDs(void)
1175 {
1176 	/*
1177 	 * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
1178 	 * needed on AMD.
1179 	 */
1180 	if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1181 		return;
1182 
1183 	/*
1184 	 * Wait for idle.
1185 	 */
1186 	apic_wait_icr_idle();
1187 
1188 	apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
1189 	apic_write(APIC_ICR, APIC_DEST_ALLINC |
1190 			APIC_INT_LEVELTRIG | APIC_DM_INIT);
1191 }
1192 
1193 /*
1194  * An initial setup of the virtual wire mode.
1195  */
1196 void __init init_bsp_APIC(void)
1197 {
1198 	unsigned int value;
1199 
1200 	/*
1201 	 * Don't do the setup now if we have a SMP BIOS as the
1202 	 * through-I/O-APIC virtual wire mode might be active.
1203 	 */
1204 	if (smp_found_config || !cpu_has_apic)
1205 		return;
1206 
1207 	/*
1208 	 * Do not trust the local APIC being empty at bootup.
1209 	 */
1210 	clear_local_APIC();
1211 
1212 	/*
1213 	 * Enable APIC.
1214 	 */
1215 	value = apic_read(APIC_SPIV);
1216 	value &= ~APIC_VECTOR_MASK;
1217 	value |= APIC_SPIV_APIC_ENABLED;
1218 
1219 #ifdef CONFIG_X86_32
1220 	/* This bit is reserved on P4/Xeon and should be cleared */
1221 	if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
1222 	    (boot_cpu_data.x86 == 15))
1223 		value &= ~APIC_SPIV_FOCUS_DISABLED;
1224 	else
1225 #endif
1226 		value |= APIC_SPIV_FOCUS_DISABLED;
1227 	value |= SPURIOUS_APIC_VECTOR;
1228 	apic_write(APIC_SPIV, value);
1229 
1230 	/*
1231 	 * Set up the virtual wire mode.
1232 	 */
1233 	apic_write(APIC_LVT0, APIC_DM_EXTINT);
1234 	value = APIC_DM_NMI;
1235 	if (!lapic_is_integrated())		/* 82489DX */
1236 		value |= APIC_LVT_LEVEL_TRIGGER;
1237 	apic_write(APIC_LVT1, value);
1238 }
1239 
1240 static void lapic_setup_esr(void)
1241 {
1242 	unsigned int oldvalue, value, maxlvt;
1243 
1244 	if (!lapic_is_integrated()) {
1245 		pr_info("No ESR for 82489DX.\n");
1246 		return;
1247 	}
1248 
1249 	if (apic->disable_esr) {
1250 		/*
1251 		 * Something untraceable is creating bad interrupts on
1252 		 * secondary quads ... for the moment, just leave the
1253 		 * ESR disabled - we can't do anything useful with the
1254 		 * errors anyway - mbligh
1255 		 */
1256 		pr_info("Leaving ESR disabled.\n");
1257 		return;
1258 	}
1259 
1260 	maxlvt = lapic_get_maxlvt();
1261 	if (maxlvt > 3)		/* Due to the Pentium erratum 3AP. */
1262 		apic_write(APIC_ESR, 0);
1263 	oldvalue = apic_read(APIC_ESR);
1264 
1265 	/* enables sending errors */
1266 	value = ERROR_APIC_VECTOR;
1267 	apic_write(APIC_LVTERR, value);
1268 
1269 	/*
1270 	 * spec says clear errors after enabling vector.
1271 	 */
1272 	if (maxlvt > 3)
1273 		apic_write(APIC_ESR, 0);
1274 	value = apic_read(APIC_ESR);
1275 	if (value != oldvalue)
1276 		apic_printk(APIC_VERBOSE, "ESR value before enabling "
1277 			"vector: 0x%08x  after: 0x%08x\n",
1278 			oldvalue, value);
1279 }
1280 
1281 /**
1282  * setup_local_APIC - setup the local APIC
1283  *
1284  * Used to setup local APIC while initializing BSP or bringin up APs.
1285  * Always called with preemption disabled.
1286  */
1287 void setup_local_APIC(void)
1288 {
1289 	int cpu = smp_processor_id();
1290 	unsigned int value, queued;
1291 	int i, j, acked = 0;
1292 	unsigned long long tsc = 0, ntsc;
1293 	long long max_loops = cpu_khz;
1294 
1295 	if (cpu_has_tsc)
1296 		rdtscll(tsc);
1297 
1298 	if (disable_apic) {
1299 		disable_ioapic_support();
1300 		return;
1301 	}
1302 
1303 #ifdef CONFIG_X86_32
1304 	/* Pound the ESR really hard over the head with a big hammer - mbligh */
1305 	if (lapic_is_integrated() && apic->disable_esr) {
1306 		apic_write(APIC_ESR, 0);
1307 		apic_write(APIC_ESR, 0);
1308 		apic_write(APIC_ESR, 0);
1309 		apic_write(APIC_ESR, 0);
1310 	}
1311 #endif
1312 	perf_events_lapic_init();
1313 
1314 	/*
1315 	 * Double-check whether this APIC is really registered.
1316 	 * This is meaningless in clustered apic mode, so we skip it.
1317 	 */
1318 	BUG_ON(!apic->apic_id_registered());
1319 
1320 	/*
1321 	 * Intel recommends to set DFR, LDR and TPR before enabling
1322 	 * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
1323 	 * document number 292116).  So here it goes...
1324 	 */
1325 	apic->init_apic_ldr();
1326 
1327 #ifdef CONFIG_X86_32
1328 	/*
1329 	 * APIC LDR is initialized.  If logical_apicid mapping was
1330 	 * initialized during get_smp_config(), make sure it matches the
1331 	 * actual value.
1332 	 */
1333 	i = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
1334 	WARN_ON(i != BAD_APICID && i != logical_smp_processor_id());
1335 	/* always use the value from LDR */
1336 	early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
1337 		logical_smp_processor_id();
1338 
1339 	/*
1340 	 * Some NUMA implementations (NUMAQ) don't initialize apicid to
1341 	 * node mapping during NUMA init.  Now that logical apicid is
1342 	 * guaranteed to be known, give it another chance.  This is already
1343 	 * a bit too late - percpu allocation has already happened without
1344 	 * proper NUMA affinity.
1345 	 */
1346 	if (apic->x86_32_numa_cpu_node)
1347 		set_apicid_to_node(early_per_cpu(x86_cpu_to_apicid, cpu),
1348 				   apic->x86_32_numa_cpu_node(cpu));
1349 #endif
1350 
1351 	/*
1352 	 * Set Task Priority to 'accept all'. We never change this
1353 	 * later on.
1354 	 */
1355 	value = apic_read(APIC_TASKPRI);
1356 	value &= ~APIC_TPRI_MASK;
1357 	apic_write(APIC_TASKPRI, value);
1358 
1359 	/*
1360 	 * After a crash, we no longer service the interrupts and a pending
1361 	 * interrupt from previous kernel might still have ISR bit set.
1362 	 *
1363 	 * Most probably by now CPU has serviced that pending interrupt and
1364 	 * it might not have done the ack_APIC_irq() because it thought,
1365 	 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
1366 	 * does not clear the ISR bit and cpu thinks it has already serivced
1367 	 * the interrupt. Hence a vector might get locked. It was noticed
1368 	 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
1369 	 */
1370 	do {
1371 		queued = 0;
1372 		for (i = APIC_ISR_NR - 1; i >= 0; i--)
1373 			queued |= apic_read(APIC_IRR + i*0x10);
1374 
1375 		for (i = APIC_ISR_NR - 1; i >= 0; i--) {
1376 			value = apic_read(APIC_ISR + i*0x10);
1377 			for (j = 31; j >= 0; j--) {
1378 				if (value & (1<<j)) {
1379 					ack_APIC_irq();
1380 					acked++;
1381 				}
1382 			}
1383 		}
1384 		if (acked > 256) {
1385 			printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n",
1386 			       acked);
1387 			break;
1388 		}
1389 		if (queued) {
1390 			if (cpu_has_tsc) {
1391 				rdtscll(ntsc);
1392 				max_loops = (cpu_khz << 10) - (ntsc - tsc);
1393 			} else
1394 				max_loops--;
1395 		}
1396 	} while (queued && max_loops > 0);
1397 	WARN_ON(max_loops <= 0);
1398 
1399 	/*
1400 	 * Now that we are all set up, enable the APIC
1401 	 */
1402 	value = apic_read(APIC_SPIV);
1403 	value &= ~APIC_VECTOR_MASK;
1404 	/*
1405 	 * Enable APIC
1406 	 */
1407 	value |= APIC_SPIV_APIC_ENABLED;
1408 
1409 #ifdef CONFIG_X86_32
1410 	/*
1411 	 * Some unknown Intel IO/APIC (or APIC) errata is biting us with
1412 	 * certain networking cards. If high frequency interrupts are
1413 	 * happening on a particular IOAPIC pin, plus the IOAPIC routing
1414 	 * entry is masked/unmasked at a high rate as well then sooner or
1415 	 * later IOAPIC line gets 'stuck', no more interrupts are received
1416 	 * from the device. If focus CPU is disabled then the hang goes
1417 	 * away, oh well :-(
1418 	 *
1419 	 * [ This bug can be reproduced easily with a level-triggered
1420 	 *   PCI Ne2000 networking cards and PII/PIII processors, dual
1421 	 *   BX chipset. ]
1422 	 */
1423 	/*
1424 	 * Actually disabling the focus CPU check just makes the hang less
1425 	 * frequent as it makes the interrupt distributon model be more
1426 	 * like LRU than MRU (the short-term load is more even across CPUs).
1427 	 * See also the comment in end_level_ioapic_irq().  --macro
1428 	 */
1429 
1430 	/*
1431 	 * - enable focus processor (bit==0)
1432 	 * - 64bit mode always use processor focus
1433 	 *   so no need to set it
1434 	 */
1435 	value &= ~APIC_SPIV_FOCUS_DISABLED;
1436 #endif
1437 
1438 	/*
1439 	 * Set spurious IRQ vector
1440 	 */
1441 	value |= SPURIOUS_APIC_VECTOR;
1442 	apic_write(APIC_SPIV, value);
1443 
1444 	/*
1445 	 * Set up LVT0, LVT1:
1446 	 *
1447 	 * set up through-local-APIC on the BP's LINT0. This is not
1448 	 * strictly necessary in pure symmetric-IO mode, but sometimes
1449 	 * we delegate interrupts to the 8259A.
1450 	 */
1451 	/*
1452 	 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
1453 	 */
1454 	value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
1455 	if (!cpu && (pic_mode || !value)) {
1456 		value = APIC_DM_EXTINT;
1457 		apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
1458 	} else {
1459 		value = APIC_DM_EXTINT | APIC_LVT_MASKED;
1460 		apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", cpu);
1461 	}
1462 	apic_write(APIC_LVT0, value);
1463 
1464 	/*
1465 	 * only the BP should see the LINT1 NMI signal, obviously.
1466 	 */
1467 	if (!cpu)
1468 		value = APIC_DM_NMI;
1469 	else
1470 		value = APIC_DM_NMI | APIC_LVT_MASKED;
1471 	if (!lapic_is_integrated())		/* 82489DX */
1472 		value |= APIC_LVT_LEVEL_TRIGGER;
1473 	apic_write(APIC_LVT1, value);
1474 
1475 #ifdef CONFIG_X86_MCE_INTEL
1476 	/* Recheck CMCI information after local APIC is up on CPU #0 */
1477 	if (!cpu)
1478 		cmci_recheck();
1479 #endif
1480 }
1481 
1482 void end_local_APIC_setup(void)
1483 {
1484 	lapic_setup_esr();
1485 
1486 #ifdef CONFIG_X86_32
1487 	{
1488 		unsigned int value;
1489 		/* Disable the local apic timer */
1490 		value = apic_read(APIC_LVTT);
1491 		value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1492 		apic_write(APIC_LVTT, value);
1493 	}
1494 #endif
1495 
1496 	apic_pm_activate();
1497 }
1498 
1499 void __init bsp_end_local_APIC_setup(void)
1500 {
1501 	end_local_APIC_setup();
1502 
1503 	/*
1504 	 * Now that local APIC setup is completed for BP, configure the fault
1505 	 * handling for interrupt remapping.
1506 	 */
1507 	irq_remap_enable_fault_handling();
1508 
1509 }
1510 
1511 #ifdef CONFIG_X86_X2APIC
1512 /*
1513  * Need to disable xapic and x2apic at the same time and then enable xapic mode
1514  */
1515 static inline void __disable_x2apic(u64 msr)
1516 {
1517 	wrmsrl(MSR_IA32_APICBASE,
1518 	       msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
1519 	wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
1520 }
1521 
1522 static __init void disable_x2apic(void)
1523 {
1524 	u64 msr;
1525 
1526 	if (!cpu_has_x2apic)
1527 		return;
1528 
1529 	rdmsrl(MSR_IA32_APICBASE, msr);
1530 	if (msr & X2APIC_ENABLE) {
1531 		u32 x2apic_id = read_apic_id();
1532 
1533 		if (x2apic_id >= 255)
1534 			panic("Cannot disable x2apic, id: %08x\n", x2apic_id);
1535 
1536 		pr_info("Disabling x2apic\n");
1537 		__disable_x2apic(msr);
1538 
1539 		if (nox2apic) {
1540 			clear_cpu_cap(&cpu_data(0), X86_FEATURE_X2APIC);
1541 			setup_clear_cpu_cap(X86_FEATURE_X2APIC);
1542 		}
1543 
1544 		x2apic_disabled = 1;
1545 		x2apic_mode = 0;
1546 
1547 		register_lapic_address(mp_lapic_addr);
1548 	}
1549 }
1550 
1551 void check_x2apic(void)
1552 {
1553 	if (x2apic_enabled()) {
1554 		pr_info("x2apic enabled by BIOS, switching to x2apic ops\n");
1555 		x2apic_preenabled = x2apic_mode = 1;
1556 	}
1557 }
1558 
1559 void enable_x2apic(void)
1560 {
1561 	u64 msr;
1562 
1563 	rdmsrl(MSR_IA32_APICBASE, msr);
1564 	if (x2apic_disabled) {
1565 		__disable_x2apic(msr);
1566 		return;
1567 	}
1568 
1569 	if (!x2apic_mode)
1570 		return;
1571 
1572 	if (!(msr & X2APIC_ENABLE)) {
1573 		printk_once(KERN_INFO "Enabling x2apic\n");
1574 		wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
1575 	}
1576 }
1577 #endif /* CONFIG_X86_X2APIC */
1578 
1579 int __init enable_IR(void)
1580 {
1581 #ifdef CONFIG_IRQ_REMAP
1582 	if (!irq_remapping_supported()) {
1583 		pr_debug("intr-remapping not supported\n");
1584 		return -1;
1585 	}
1586 
1587 	if (!x2apic_preenabled && skip_ioapic_setup) {
1588 		pr_info("Skipped enabling intr-remap because of skipping "
1589 			"io-apic setup\n");
1590 		return -1;
1591 	}
1592 
1593 	return irq_remapping_enable();
1594 #endif
1595 	return -1;
1596 }
1597 
1598 void __init enable_IR_x2apic(void)
1599 {
1600 	unsigned long flags;
1601 	int ret, x2apic_enabled = 0;
1602 	int hardware_init_ret;
1603 
1604 	/* Make sure irq_remap_ops are initialized */
1605 	setup_irq_remapping_ops();
1606 
1607 	hardware_init_ret = irq_remapping_prepare();
1608 	if (hardware_init_ret && !x2apic_supported())
1609 		return;
1610 
1611 	ret = save_ioapic_entries();
1612 	if (ret) {
1613 		pr_info("Saving IO-APIC state failed: %d\n", ret);
1614 		return;
1615 	}
1616 
1617 	local_irq_save(flags);
1618 	legacy_pic->mask_all();
1619 	mask_ioapic_entries();
1620 
1621 	if (x2apic_preenabled && nox2apic)
1622 		disable_x2apic();
1623 
1624 	if (hardware_init_ret)
1625 		ret = -1;
1626 	else
1627 		ret = enable_IR();
1628 
1629 	if (!x2apic_supported())
1630 		goto skip_x2apic;
1631 
1632 	if (ret < 0) {
1633 		/* IR is required if there is APIC ID > 255 even when running
1634 		 * under KVM
1635 		 */
1636 		if (max_physical_apicid > 255 ||
1637 		    !hypervisor_x2apic_available()) {
1638 			if (x2apic_preenabled)
1639 				disable_x2apic();
1640 			goto skip_x2apic;
1641 		}
1642 		/*
1643 		 * without IR all CPUs can be addressed by IOAPIC/MSI
1644 		 * only in physical mode
1645 		 */
1646 		x2apic_force_phys();
1647 	}
1648 
1649 	if (ret == IRQ_REMAP_XAPIC_MODE) {
1650 		pr_info("x2apic not enabled, IRQ remapping is in xapic mode\n");
1651 		goto skip_x2apic;
1652 	}
1653 
1654 	x2apic_enabled = 1;
1655 
1656 	if (x2apic_supported() && !x2apic_mode) {
1657 		x2apic_mode = 1;
1658 		enable_x2apic();
1659 		pr_info("Enabled x2apic\n");
1660 	}
1661 
1662 skip_x2apic:
1663 	if (ret < 0) /* IR enabling failed */
1664 		restore_ioapic_entries();
1665 	legacy_pic->restore_mask();
1666 	local_irq_restore(flags);
1667 }
1668 
1669 #ifdef CONFIG_X86_64
1670 /*
1671  * Detect and enable local APICs on non-SMP boards.
1672  * Original code written by Keir Fraser.
1673  * On AMD64 we trust the BIOS - if it says no APIC it is likely
1674  * not correctly set up (usually the APIC timer won't work etc.)
1675  */
1676 static int __init detect_init_APIC(void)
1677 {
1678 	if (!cpu_has_apic) {
1679 		pr_info("No local APIC present\n");
1680 		return -1;
1681 	}
1682 
1683 	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1684 	return 0;
1685 }
1686 #else
1687 
1688 static int __init apic_verify(void)
1689 {
1690 	u32 features, h, l;
1691 
1692 	/*
1693 	 * The APIC feature bit should now be enabled
1694 	 * in `cpuid'
1695 	 */
1696 	features = cpuid_edx(1);
1697 	if (!(features & (1 << X86_FEATURE_APIC))) {
1698 		pr_warning("Could not enable APIC!\n");
1699 		return -1;
1700 	}
1701 	set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1702 	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1703 
1704 	/* The BIOS may have set up the APIC at some other address */
1705 	if (boot_cpu_data.x86 >= 6) {
1706 		rdmsr(MSR_IA32_APICBASE, l, h);
1707 		if (l & MSR_IA32_APICBASE_ENABLE)
1708 			mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
1709 	}
1710 
1711 	pr_info("Found and enabled local APIC!\n");
1712 	return 0;
1713 }
1714 
1715 int __init apic_force_enable(unsigned long addr)
1716 {
1717 	u32 h, l;
1718 
1719 	if (disable_apic)
1720 		return -1;
1721 
1722 	/*
1723 	 * Some BIOSes disable the local APIC in the APIC_BASE
1724 	 * MSR. This can only be done in software for Intel P6 or later
1725 	 * and AMD K7 (Model > 1) or later.
1726 	 */
1727 	if (boot_cpu_data.x86 >= 6) {
1728 		rdmsr(MSR_IA32_APICBASE, l, h);
1729 		if (!(l & MSR_IA32_APICBASE_ENABLE)) {
1730 			pr_info("Local APIC disabled by BIOS -- reenabling.\n");
1731 			l &= ~MSR_IA32_APICBASE_BASE;
1732 			l |= MSR_IA32_APICBASE_ENABLE | addr;
1733 			wrmsr(MSR_IA32_APICBASE, l, h);
1734 			enabled_via_apicbase = 1;
1735 		}
1736 	}
1737 	return apic_verify();
1738 }
1739 
1740 /*
1741  * Detect and initialize APIC
1742  */
1743 static int __init detect_init_APIC(void)
1744 {
1745 	/* Disabled by kernel option? */
1746 	if (disable_apic)
1747 		return -1;
1748 
1749 	switch (boot_cpu_data.x86_vendor) {
1750 	case X86_VENDOR_AMD:
1751 		if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
1752 		    (boot_cpu_data.x86 >= 15))
1753 			break;
1754 		goto no_apic;
1755 	case X86_VENDOR_INTEL:
1756 		if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
1757 		    (boot_cpu_data.x86 == 5 && cpu_has_apic))
1758 			break;
1759 		goto no_apic;
1760 	default:
1761 		goto no_apic;
1762 	}
1763 
1764 	if (!cpu_has_apic) {
1765 		/*
1766 		 * Over-ride BIOS and try to enable the local APIC only if
1767 		 * "lapic" specified.
1768 		 */
1769 		if (!force_enable_local_apic) {
1770 			pr_info("Local APIC disabled by BIOS -- "
1771 				"you can enable it with \"lapic\"\n");
1772 			return -1;
1773 		}
1774 		if (apic_force_enable(APIC_DEFAULT_PHYS_BASE))
1775 			return -1;
1776 	} else {
1777 		if (apic_verify())
1778 			return -1;
1779 	}
1780 
1781 	apic_pm_activate();
1782 
1783 	return 0;
1784 
1785 no_apic:
1786 	pr_info("No local APIC present or hardware disabled\n");
1787 	return -1;
1788 }
1789 #endif
1790 
1791 /**
1792  * init_apic_mappings - initialize APIC mappings
1793  */
1794 void __init init_apic_mappings(void)
1795 {
1796 	unsigned int new_apicid;
1797 
1798 	if (x2apic_mode) {
1799 		boot_cpu_physical_apicid = read_apic_id();
1800 		return;
1801 	}
1802 
1803 	/* If no local APIC can be found return early */
1804 	if (!smp_found_config && detect_init_APIC()) {
1805 		/* lets NOP'ify apic operations */
1806 		pr_info("APIC: disable apic facility\n");
1807 		apic_disable();
1808 	} else {
1809 		apic_phys = mp_lapic_addr;
1810 
1811 		/*
1812 		 * acpi lapic path already maps that address in
1813 		 * acpi_register_lapic_address()
1814 		 */
1815 		if (!acpi_lapic && !smp_found_config)
1816 			register_lapic_address(apic_phys);
1817 	}
1818 
1819 	/*
1820 	 * Fetch the APIC ID of the BSP in case we have a
1821 	 * default configuration (or the MP table is broken).
1822 	 */
1823 	new_apicid = read_apic_id();
1824 	if (boot_cpu_physical_apicid != new_apicid) {
1825 		boot_cpu_physical_apicid = new_apicid;
1826 		/*
1827 		 * yeah -- we lie about apic_version
1828 		 * in case if apic was disabled via boot option
1829 		 * but it's not a problem for SMP compiled kernel
1830 		 * since smp_sanity_check is prepared for such a case
1831 		 * and disable smp mode
1832 		 */
1833 		apic_version[new_apicid] =
1834 			 GET_APIC_VERSION(apic_read(APIC_LVR));
1835 	}
1836 }
1837 
1838 void __init register_lapic_address(unsigned long address)
1839 {
1840 	mp_lapic_addr = address;
1841 
1842 	if (!x2apic_mode) {
1843 		set_fixmap_nocache(FIX_APIC_BASE, address);
1844 		apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
1845 			    APIC_BASE, mp_lapic_addr);
1846 	}
1847 	if (boot_cpu_physical_apicid == -1U) {
1848 		boot_cpu_physical_apicid  = read_apic_id();
1849 		apic_version[boot_cpu_physical_apicid] =
1850 			 GET_APIC_VERSION(apic_read(APIC_LVR));
1851 	}
1852 }
1853 
1854 /*
1855  * This initializes the IO-APIC and APIC hardware if this is
1856  * a UP kernel.
1857  */
1858 int apic_version[MAX_LOCAL_APIC];
1859 
1860 int __init APIC_init_uniprocessor(void)
1861 {
1862 	if (disable_apic) {
1863 		pr_info("Apic disabled\n");
1864 		return -1;
1865 	}
1866 #ifdef CONFIG_X86_64
1867 	if (!cpu_has_apic) {
1868 		disable_apic = 1;
1869 		pr_info("Apic disabled by BIOS\n");
1870 		return -1;
1871 	}
1872 #else
1873 	if (!smp_found_config && !cpu_has_apic)
1874 		return -1;
1875 
1876 	/*
1877 	 * Complain if the BIOS pretends there is one.
1878 	 */
1879 	if (!cpu_has_apic &&
1880 	    APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
1881 		pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
1882 			boot_cpu_physical_apicid);
1883 		return -1;
1884 	}
1885 #endif
1886 
1887 	default_setup_apic_routing();
1888 
1889 	verify_local_APIC();
1890 	connect_bsp_APIC();
1891 
1892 #ifdef CONFIG_X86_64
1893 	apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
1894 #else
1895 	/*
1896 	 * Hack: In case of kdump, after a crash, kernel might be booting
1897 	 * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
1898 	 * might be zero if read from MP tables. Get it from LAPIC.
1899 	 */
1900 # ifdef CONFIG_CRASH_DUMP
1901 	boot_cpu_physical_apicid = read_apic_id();
1902 # endif
1903 #endif
1904 	physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1905 	setup_local_APIC();
1906 
1907 #ifdef CONFIG_X86_IO_APIC
1908 	/*
1909 	 * Now enable IO-APICs, actually call clear_IO_APIC
1910 	 * We need clear_IO_APIC before enabling error vector
1911 	 */
1912 	if (!skip_ioapic_setup && nr_ioapics)
1913 		enable_IO_APIC();
1914 #endif
1915 
1916 	bsp_end_local_APIC_setup();
1917 
1918 #ifdef CONFIG_X86_IO_APIC
1919 	if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
1920 		setup_IO_APIC();
1921 	else {
1922 		nr_ioapics = 0;
1923 	}
1924 #endif
1925 
1926 	x86_init.timers.setup_percpu_clockev();
1927 	return 0;
1928 }
1929 
1930 /*
1931  * Local APIC interrupts
1932  */
1933 
1934 /*
1935  * This interrupt should _never_ happen with our APIC/SMP architecture
1936  */
1937 static inline void __smp_spurious_interrupt(void)
1938 {
1939 	u32 v;
1940 
1941 	/*
1942 	 * Check if this really is a spurious interrupt and ACK it
1943 	 * if it is a vectored one.  Just in case...
1944 	 * Spurious interrupts should not be ACKed.
1945 	 */
1946 	v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
1947 	if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1948 		ack_APIC_irq();
1949 
1950 	inc_irq_stat(irq_spurious_count);
1951 
1952 	/* see sw-dev-man vol 3, chapter 7.4.13.5 */
1953 	pr_info("spurious APIC interrupt on CPU#%d, "
1954 		"should never happen.\n", smp_processor_id());
1955 }
1956 
1957 __visible void smp_spurious_interrupt(struct pt_regs *regs)
1958 {
1959 	entering_irq();
1960 	__smp_spurious_interrupt();
1961 	exiting_irq();
1962 }
1963 
1964 __visible void smp_trace_spurious_interrupt(struct pt_regs *regs)
1965 {
1966 	entering_irq();
1967 	trace_spurious_apic_entry(SPURIOUS_APIC_VECTOR);
1968 	__smp_spurious_interrupt();
1969 	trace_spurious_apic_exit(SPURIOUS_APIC_VECTOR);
1970 	exiting_irq();
1971 }
1972 
1973 /*
1974  * This interrupt should never happen with our APIC/SMP architecture
1975  */
1976 static inline void __smp_error_interrupt(struct pt_regs *regs)
1977 {
1978 	u32 v;
1979 	u32 i = 0;
1980 	static const char * const error_interrupt_reason[] = {
1981 		"Send CS error",		/* APIC Error Bit 0 */
1982 		"Receive CS error",		/* APIC Error Bit 1 */
1983 		"Send accept error",		/* APIC Error Bit 2 */
1984 		"Receive accept error",		/* APIC Error Bit 3 */
1985 		"Redirectable IPI",		/* APIC Error Bit 4 */
1986 		"Send illegal vector",		/* APIC Error Bit 5 */
1987 		"Received illegal vector",	/* APIC Error Bit 6 */
1988 		"Illegal register address",	/* APIC Error Bit 7 */
1989 	};
1990 
1991 	/* First tickle the hardware, only then report what went on. -- REW */
1992 	apic_write(APIC_ESR, 0);
1993 	v = apic_read(APIC_ESR);
1994 	ack_APIC_irq();
1995 	atomic_inc(&irq_err_count);
1996 
1997 	apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
1998 		    smp_processor_id(), v);
1999 
2000 	v &= 0xff;
2001 	while (v) {
2002 		if (v & 0x1)
2003 			apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]);
2004 		i++;
2005 		v >>= 1;
2006 	}
2007 
2008 	apic_printk(APIC_DEBUG, KERN_CONT "\n");
2009 
2010 }
2011 
2012 __visible void smp_error_interrupt(struct pt_regs *regs)
2013 {
2014 	entering_irq();
2015 	__smp_error_interrupt(regs);
2016 	exiting_irq();
2017 }
2018 
2019 __visible void smp_trace_error_interrupt(struct pt_regs *regs)
2020 {
2021 	entering_irq();
2022 	trace_error_apic_entry(ERROR_APIC_VECTOR);
2023 	__smp_error_interrupt(regs);
2024 	trace_error_apic_exit(ERROR_APIC_VECTOR);
2025 	exiting_irq();
2026 }
2027 
2028 /**
2029  * connect_bsp_APIC - attach the APIC to the interrupt system
2030  */
2031 void __init connect_bsp_APIC(void)
2032 {
2033 #ifdef CONFIG_X86_32
2034 	if (pic_mode) {
2035 		/*
2036 		 * Do not trust the local APIC being empty at bootup.
2037 		 */
2038 		clear_local_APIC();
2039 		/*
2040 		 * PIC mode, enable APIC mode in the IMCR, i.e.  connect BSP's
2041 		 * local APIC to INT and NMI lines.
2042 		 */
2043 		apic_printk(APIC_VERBOSE, "leaving PIC mode, "
2044 				"enabling APIC mode.\n");
2045 		imcr_pic_to_apic();
2046 	}
2047 #endif
2048 	if (apic->enable_apic_mode)
2049 		apic->enable_apic_mode();
2050 }
2051 
2052 /**
2053  * disconnect_bsp_APIC - detach the APIC from the interrupt system
2054  * @virt_wire_setup:	indicates, whether virtual wire mode is selected
2055  *
2056  * Virtual wire mode is necessary to deliver legacy interrupts even when the
2057  * APIC is disabled.
2058  */
2059 void disconnect_bsp_APIC(int virt_wire_setup)
2060 {
2061 	unsigned int value;
2062 
2063 #ifdef CONFIG_X86_32
2064 	if (pic_mode) {
2065 		/*
2066 		 * Put the board back into PIC mode (has an effect only on
2067 		 * certain older boards).  Note that APIC interrupts, including
2068 		 * IPIs, won't work beyond this point!  The only exception are
2069 		 * INIT IPIs.
2070 		 */
2071 		apic_printk(APIC_VERBOSE, "disabling APIC mode, "
2072 				"entering PIC mode.\n");
2073 		imcr_apic_to_pic();
2074 		return;
2075 	}
2076 #endif
2077 
2078 	/* Go back to Virtual Wire compatibility mode */
2079 
2080 	/* For the spurious interrupt use vector F, and enable it */
2081 	value = apic_read(APIC_SPIV);
2082 	value &= ~APIC_VECTOR_MASK;
2083 	value |= APIC_SPIV_APIC_ENABLED;
2084 	value |= 0xf;
2085 	apic_write(APIC_SPIV, value);
2086 
2087 	if (!virt_wire_setup) {
2088 		/*
2089 		 * For LVT0 make it edge triggered, active high,
2090 		 * external and enabled
2091 		 */
2092 		value = apic_read(APIC_LVT0);
2093 		value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
2094 			APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
2095 			APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
2096 		value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
2097 		value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
2098 		apic_write(APIC_LVT0, value);
2099 	} else {
2100 		/* Disable LVT0 */
2101 		apic_write(APIC_LVT0, APIC_LVT_MASKED);
2102 	}
2103 
2104 	/*
2105 	 * For LVT1 make it edge triggered, active high,
2106 	 * nmi and enabled
2107 	 */
2108 	value = apic_read(APIC_LVT1);
2109 	value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
2110 			APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
2111 			APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
2112 	value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
2113 	value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
2114 	apic_write(APIC_LVT1, value);
2115 }
2116 
2117 int generic_processor_info(int apicid, int version)
2118 {
2119 	int cpu, max = nr_cpu_ids;
2120 	bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
2121 				phys_cpu_present_map);
2122 
2123 	/*
2124 	 * boot_cpu_physical_apicid is designed to have the apicid
2125 	 * returned by read_apic_id(), i.e, the apicid of the
2126 	 * currently booting-up processor. However, on some platforms,
2127 	 * it is temporarily modified by the apicid reported as BSP
2128 	 * through MP table. Concretely:
2129 	 *
2130 	 * - arch/x86/kernel/mpparse.c: MP_processor_info()
2131 	 * - arch/x86/mm/amdtopology.c: amd_numa_init()
2132 	 * - arch/x86/platform/visws/visws_quirks.c: MP_processor_info()
2133 	 *
2134 	 * This function is executed with the modified
2135 	 * boot_cpu_physical_apicid. So, disabled_cpu_apicid kernel
2136 	 * parameter doesn't work to disable APs on kdump 2nd kernel.
2137 	 *
2138 	 * Since fixing handling of boot_cpu_physical_apicid requires
2139 	 * another discussion and tests on each platform, we leave it
2140 	 * for now and here we use read_apic_id() directly in this
2141 	 * function, generic_processor_info().
2142 	 */
2143 	if (disabled_cpu_apicid != BAD_APICID &&
2144 	    disabled_cpu_apicid != read_apic_id() &&
2145 	    disabled_cpu_apicid == apicid) {
2146 		int thiscpu = num_processors + disabled_cpus;
2147 
2148 		pr_warning("APIC: Disabling requested cpu."
2149 			   " Processor %d/0x%x ignored.\n",
2150 			   thiscpu, apicid);
2151 
2152 		disabled_cpus++;
2153 		return -ENODEV;
2154 	}
2155 
2156 	/*
2157 	 * If boot cpu has not been detected yet, then only allow upto
2158 	 * nr_cpu_ids - 1 processors and keep one slot free for boot cpu
2159 	 */
2160 	if (!boot_cpu_detected && num_processors >= nr_cpu_ids - 1 &&
2161 	    apicid != boot_cpu_physical_apicid) {
2162 		int thiscpu = max + disabled_cpus - 1;
2163 
2164 		pr_warning(
2165 			"ACPI: NR_CPUS/possible_cpus limit of %i almost"
2166 			" reached. Keeping one slot for boot cpu."
2167 			"  Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
2168 
2169 		disabled_cpus++;
2170 		return -ENODEV;
2171 	}
2172 
2173 	if (num_processors >= nr_cpu_ids) {
2174 		int thiscpu = max + disabled_cpus;
2175 
2176 		pr_warning(
2177 			"ACPI: NR_CPUS/possible_cpus limit of %i reached."
2178 			"  Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
2179 
2180 		disabled_cpus++;
2181 		return -EINVAL;
2182 	}
2183 
2184 	num_processors++;
2185 	if (apicid == boot_cpu_physical_apicid) {
2186 		/*
2187 		 * x86_bios_cpu_apicid is required to have processors listed
2188 		 * in same order as logical cpu numbers. Hence the first
2189 		 * entry is BSP, and so on.
2190 		 * boot_cpu_init() already hold bit 0 in cpu_present_mask
2191 		 * for BSP.
2192 		 */
2193 		cpu = 0;
2194 	} else
2195 		cpu = cpumask_next_zero(-1, cpu_present_mask);
2196 
2197 	/*
2198 	 * Validate version
2199 	 */
2200 	if (version == 0x0) {
2201 		pr_warning("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
2202 			   cpu, apicid);
2203 		version = 0x10;
2204 	}
2205 	apic_version[apicid] = version;
2206 
2207 	if (version != apic_version[boot_cpu_physical_apicid]) {
2208 		pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
2209 			apic_version[boot_cpu_physical_apicid], cpu, version);
2210 	}
2211 
2212 	physid_set(apicid, phys_cpu_present_map);
2213 	if (apicid > max_physical_apicid)
2214 		max_physical_apicid = apicid;
2215 
2216 #if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
2217 	early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
2218 	early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
2219 #endif
2220 #ifdef CONFIG_X86_32
2221 	early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
2222 		apic->x86_32_early_logical_apicid(cpu);
2223 #endif
2224 	set_cpu_possible(cpu, true);
2225 	set_cpu_present(cpu, true);
2226 
2227 	return cpu;
2228 }
2229 
2230 int hard_smp_processor_id(void)
2231 {
2232 	return read_apic_id();
2233 }
2234 
2235 void default_init_apic_ldr(void)
2236 {
2237 	unsigned long val;
2238 
2239 	apic_write(APIC_DFR, APIC_DFR_VALUE);
2240 	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
2241 	val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
2242 	apic_write(APIC_LDR, val);
2243 }
2244 
2245 int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
2246 				   const struct cpumask *andmask,
2247 				   unsigned int *apicid)
2248 {
2249 	unsigned int cpu;
2250 
2251 	for_each_cpu_and(cpu, cpumask, andmask) {
2252 		if (cpumask_test_cpu(cpu, cpu_online_mask))
2253 			break;
2254 	}
2255 
2256 	if (likely(cpu < nr_cpu_ids)) {
2257 		*apicid = per_cpu(x86_cpu_to_apicid, cpu);
2258 		return 0;
2259 	}
2260 
2261 	return -EINVAL;
2262 }
2263 
2264 /*
2265  * Override the generic EOI implementation with an optimized version.
2266  * Only called during early boot when only one CPU is active and with
2267  * interrupts disabled, so we know this does not race with actual APIC driver
2268  * use.
2269  */
2270 void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
2271 {
2272 	struct apic **drv;
2273 
2274 	for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
2275 		/* Should happen once for each apic */
2276 		WARN_ON((*drv)->eoi_write == eoi_write);
2277 		(*drv)->eoi_write = eoi_write;
2278 	}
2279 }
2280 
2281 /*
2282  * Power management
2283  */
2284 #ifdef CONFIG_PM
2285 
2286 static struct {
2287 	/*
2288 	 * 'active' is true if the local APIC was enabled by us and
2289 	 * not the BIOS; this signifies that we are also responsible
2290 	 * for disabling it before entering apm/acpi suspend
2291 	 */
2292 	int active;
2293 	/* r/w apic fields */
2294 	unsigned int apic_id;
2295 	unsigned int apic_taskpri;
2296 	unsigned int apic_ldr;
2297 	unsigned int apic_dfr;
2298 	unsigned int apic_spiv;
2299 	unsigned int apic_lvtt;
2300 	unsigned int apic_lvtpc;
2301 	unsigned int apic_lvt0;
2302 	unsigned int apic_lvt1;
2303 	unsigned int apic_lvterr;
2304 	unsigned int apic_tmict;
2305 	unsigned int apic_tdcr;
2306 	unsigned int apic_thmr;
2307 } apic_pm_state;
2308 
2309 static int lapic_suspend(void)
2310 {
2311 	unsigned long flags;
2312 	int maxlvt;
2313 
2314 	if (!apic_pm_state.active)
2315 		return 0;
2316 
2317 	maxlvt = lapic_get_maxlvt();
2318 
2319 	apic_pm_state.apic_id = apic_read(APIC_ID);
2320 	apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
2321 	apic_pm_state.apic_ldr = apic_read(APIC_LDR);
2322 	apic_pm_state.apic_dfr = apic_read(APIC_DFR);
2323 	apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
2324 	apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
2325 	if (maxlvt >= 4)
2326 		apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
2327 	apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
2328 	apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
2329 	apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
2330 	apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
2331 	apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
2332 #ifdef CONFIG_X86_THERMAL_VECTOR
2333 	if (maxlvt >= 5)
2334 		apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
2335 #endif
2336 
2337 	local_irq_save(flags);
2338 	disable_local_APIC();
2339 
2340 	irq_remapping_disable();
2341 
2342 	local_irq_restore(flags);
2343 	return 0;
2344 }
2345 
2346 static void lapic_resume(void)
2347 {
2348 	unsigned int l, h;
2349 	unsigned long flags;
2350 	int maxlvt;
2351 
2352 	if (!apic_pm_state.active)
2353 		return;
2354 
2355 	local_irq_save(flags);
2356 
2357 	/*
2358 	 * IO-APIC and PIC have their own resume routines.
2359 	 * We just mask them here to make sure the interrupt
2360 	 * subsystem is completely quiet while we enable x2apic
2361 	 * and interrupt-remapping.
2362 	 */
2363 	mask_ioapic_entries();
2364 	legacy_pic->mask_all();
2365 
2366 	if (x2apic_mode)
2367 		enable_x2apic();
2368 	else {
2369 		/*
2370 		 * Make sure the APICBASE points to the right address
2371 		 *
2372 		 * FIXME! This will be wrong if we ever support suspend on
2373 		 * SMP! We'll need to do this as part of the CPU restore!
2374 		 */
2375 		if (boot_cpu_data.x86 >= 6) {
2376 			rdmsr(MSR_IA32_APICBASE, l, h);
2377 			l &= ~MSR_IA32_APICBASE_BASE;
2378 			l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
2379 			wrmsr(MSR_IA32_APICBASE, l, h);
2380 		}
2381 	}
2382 
2383 	maxlvt = lapic_get_maxlvt();
2384 	apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
2385 	apic_write(APIC_ID, apic_pm_state.apic_id);
2386 	apic_write(APIC_DFR, apic_pm_state.apic_dfr);
2387 	apic_write(APIC_LDR, apic_pm_state.apic_ldr);
2388 	apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
2389 	apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
2390 	apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
2391 	apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
2392 #if defined(CONFIG_X86_MCE_INTEL)
2393 	if (maxlvt >= 5)
2394 		apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
2395 #endif
2396 	if (maxlvt >= 4)
2397 		apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
2398 	apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
2399 	apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
2400 	apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
2401 	apic_write(APIC_ESR, 0);
2402 	apic_read(APIC_ESR);
2403 	apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
2404 	apic_write(APIC_ESR, 0);
2405 	apic_read(APIC_ESR);
2406 
2407 	irq_remapping_reenable(x2apic_mode);
2408 
2409 	local_irq_restore(flags);
2410 }
2411 
2412 /*
2413  * This device has no shutdown method - fully functioning local APICs
2414  * are needed on every CPU up until machine_halt/restart/poweroff.
2415  */
2416 
2417 static struct syscore_ops lapic_syscore_ops = {
2418 	.resume		= lapic_resume,
2419 	.suspend	= lapic_suspend,
2420 };
2421 
2422 static void apic_pm_activate(void)
2423 {
2424 	apic_pm_state.active = 1;
2425 }
2426 
2427 static int __init init_lapic_sysfs(void)
2428 {
2429 	/* XXX: remove suspend/resume procs if !apic_pm_state.active? */
2430 	if (cpu_has_apic)
2431 		register_syscore_ops(&lapic_syscore_ops);
2432 
2433 	return 0;
2434 }
2435 
2436 /* local apic needs to resume before other devices access its registers. */
2437 core_initcall(init_lapic_sysfs);
2438 
2439 #else	/* CONFIG_PM */
2440 
2441 static void apic_pm_activate(void) { }
2442 
2443 #endif	/* CONFIG_PM */
2444 
2445 #ifdef CONFIG_X86_64
2446 
2447 static int apic_cluster_num(void)
2448 {
2449 	int i, clusters, zeros;
2450 	unsigned id;
2451 	u16 *bios_cpu_apicid;
2452 	DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
2453 
2454 	bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
2455 	bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
2456 
2457 	for (i = 0; i < nr_cpu_ids; i++) {
2458 		/* are we being called early in kernel startup? */
2459 		if (bios_cpu_apicid) {
2460 			id = bios_cpu_apicid[i];
2461 		} else if (i < nr_cpu_ids) {
2462 			if (cpu_present(i))
2463 				id = per_cpu(x86_bios_cpu_apicid, i);
2464 			else
2465 				continue;
2466 		} else
2467 			break;
2468 
2469 		if (id != BAD_APICID)
2470 			__set_bit(APIC_CLUSTERID(id), clustermap);
2471 	}
2472 
2473 	/* Problem:  Partially populated chassis may not have CPUs in some of
2474 	 * the APIC clusters they have been allocated.  Only present CPUs have
2475 	 * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
2476 	 * Since clusters are allocated sequentially, count zeros only if
2477 	 * they are bounded by ones.
2478 	 */
2479 	clusters = 0;
2480 	zeros = 0;
2481 	for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
2482 		if (test_bit(i, clustermap)) {
2483 			clusters += 1 + zeros;
2484 			zeros = 0;
2485 		} else
2486 			++zeros;
2487 	}
2488 
2489 	return clusters;
2490 }
2491 
2492 static int multi_checked;
2493 static int multi;
2494 
2495 static int set_multi(const struct dmi_system_id *d)
2496 {
2497 	if (multi)
2498 		return 0;
2499 	pr_info("APIC: %s detected, Multi Chassis\n", d->ident);
2500 	multi = 1;
2501 	return 0;
2502 }
2503 
2504 static const struct dmi_system_id multi_dmi_table[] = {
2505 	{
2506 		.callback = set_multi,
2507 		.ident = "IBM System Summit2",
2508 		.matches = {
2509 			DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
2510 			DMI_MATCH(DMI_PRODUCT_NAME, "Summit2"),
2511 		},
2512 	},
2513 	{}
2514 };
2515 
2516 static void dmi_check_multi(void)
2517 {
2518 	if (multi_checked)
2519 		return;
2520 
2521 	dmi_check_system(multi_dmi_table);
2522 	multi_checked = 1;
2523 }
2524 
2525 /*
2526  * apic_is_clustered_box() -- Check if we can expect good TSC
2527  *
2528  * Thus far, the major user of this is IBM's Summit2 series:
2529  * Clustered boxes may have unsynced TSC problems if they are
2530  * multi-chassis.
2531  * Use DMI to check them
2532  */
2533 int apic_is_clustered_box(void)
2534 {
2535 	dmi_check_multi();
2536 	if (multi)
2537 		return 1;
2538 
2539 	if (!is_vsmp_box())
2540 		return 0;
2541 
2542 	/*
2543 	 * ScaleMP vSMPowered boxes have one cluster per board and TSCs are
2544 	 * not guaranteed to be synced between boards
2545 	 */
2546 	if (apic_cluster_num() > 1)
2547 		return 1;
2548 
2549 	return 0;
2550 }
2551 #endif
2552 
2553 /*
2554  * APIC command line parameters
2555  */
2556 static int __init setup_disableapic(char *arg)
2557 {
2558 	disable_apic = 1;
2559 	setup_clear_cpu_cap(X86_FEATURE_APIC);
2560 	return 0;
2561 }
2562 early_param("disableapic", setup_disableapic);
2563 
2564 /* same as disableapic, for compatibility */
2565 static int __init setup_nolapic(char *arg)
2566 {
2567 	return setup_disableapic(arg);
2568 }
2569 early_param("nolapic", setup_nolapic);
2570 
2571 static int __init parse_lapic_timer_c2_ok(char *arg)
2572 {
2573 	local_apic_timer_c2_ok = 1;
2574 	return 0;
2575 }
2576 early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
2577 
2578 static int __init parse_disable_apic_timer(char *arg)
2579 {
2580 	disable_apic_timer = 1;
2581 	return 0;
2582 }
2583 early_param("noapictimer", parse_disable_apic_timer);
2584 
2585 static int __init parse_nolapic_timer(char *arg)
2586 {
2587 	disable_apic_timer = 1;
2588 	return 0;
2589 }
2590 early_param("nolapic_timer", parse_nolapic_timer);
2591 
2592 static int __init apic_set_verbosity(char *arg)
2593 {
2594 	if (!arg)  {
2595 #ifdef CONFIG_X86_64
2596 		skip_ioapic_setup = 0;
2597 		return 0;
2598 #endif
2599 		return -EINVAL;
2600 	}
2601 
2602 	if (strcmp("debug", arg) == 0)
2603 		apic_verbosity = APIC_DEBUG;
2604 	else if (strcmp("verbose", arg) == 0)
2605 		apic_verbosity = APIC_VERBOSE;
2606 	else {
2607 		pr_warning("APIC Verbosity level %s not recognised"
2608 			" use apic=verbose or apic=debug\n", arg);
2609 		return -EINVAL;
2610 	}
2611 
2612 	return 0;
2613 }
2614 early_param("apic", apic_set_verbosity);
2615 
2616 static int __init lapic_insert_resource(void)
2617 {
2618 	if (!apic_phys)
2619 		return -1;
2620 
2621 	/* Put local APIC into the resource map. */
2622 	lapic_resource.start = apic_phys;
2623 	lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
2624 	insert_resource(&iomem_resource, &lapic_resource);
2625 
2626 	return 0;
2627 }
2628 
2629 /*
2630  * need call insert after e820_reserve_resources()
2631  * that is using request_resource
2632  */
2633 late_initcall(lapic_insert_resource);
2634 
2635 static int __init apic_set_disabled_cpu_apicid(char *arg)
2636 {
2637 	if (!arg || !get_option(&arg, &disabled_cpu_apicid))
2638 		return -EINVAL;
2639 
2640 	return 0;
2641 }
2642 early_param("disable_cpu_apicid", apic_set_disabled_cpu_apicid);
2643