xref: /openbmc/linux/arch/x86/kernel/apic/io_apic.c (revision 32f5ef5d)
1 /*
2  *	Intel IO-APIC support for multi-Pentium hosts.
3  *
4  *	Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5  *
6  *	Many thanks to Stig Venaas for trying out countless experimental
7  *	patches and reporting/debugging problems patiently!
8  *
9  *	(c) 1999, Multiple IO-APIC support, developed by
10  *	Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11  *      Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12  *	further tested and cleaned up by Zach Brown <zab@redhat.com>
13  *	and Ingo Molnar <mingo@redhat.com>
14  *
15  *	Fixes
16  *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
17  *					thanks to Eric Gilmore
18  *					and Rolf G. Tews
19  *					for testing these extensively
20  *	Paul Diefenbaugh	:	Added full ACPI support
21  */
22 
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/compiler.h>
31 #include <linux/acpi.h>
32 #include <linux/module.h>
33 #include <linux/syscore_ops.h>
34 #include <linux/msi.h>
35 #include <linux/htirq.h>
36 #include <linux/freezer.h>
37 #include <linux/kthread.h>
38 #include <linux/jiffies.h>	/* time_after() */
39 #include <linux/slab.h>
40 #include <linux/bootmem.h>
41 #include <linux/dmar.h>
42 #include <linux/hpet.h>
43 
44 #include <asm/idle.h>
45 #include <asm/io.h>
46 #include <asm/smp.h>
47 #include <asm/cpu.h>
48 #include <asm/desc.h>
49 #include <asm/proto.h>
50 #include <asm/acpi.h>
51 #include <asm/dma.h>
52 #include <asm/timer.h>
53 #include <asm/i8259.h>
54 #include <asm/msidef.h>
55 #include <asm/hypertransport.h>
56 #include <asm/setup.h>
57 #include <asm/irq_remapping.h>
58 #include <asm/hpet.h>
59 #include <asm/hw_irq.h>
60 
61 #include <asm/apic.h>
62 
63 #define __apicdebuginit(type) static type __init
64 
65 #define	for_each_ioapic(idx)		\
66 	for ((idx) = 0; (idx) < nr_ioapics; (idx)++)
67 #define	for_each_ioapic_reverse(idx)	\
68 	for ((idx) = nr_ioapics - 1; (idx) >= 0; (idx)--)
69 #define	for_each_pin(idx, pin)		\
70 	for ((pin) = 0; (pin) < ioapics[(idx)].nr_registers; (pin)++)
71 #define	for_each_ioapic_pin(idx, pin)	\
72 	for_each_ioapic((idx))		\
73 		for_each_pin((idx), (pin))
74 
75 #define for_each_irq_pin(entry, head) \
76 	for (entry = head; entry; entry = entry->next)
77 
78 /*
79  *      Is the SiS APIC rmw bug present ?
80  *      -1 = don't know, 0 = no, 1 = yes
81  */
82 int sis_apic_bug = -1;
83 
84 static DEFINE_RAW_SPINLOCK(ioapic_lock);
85 static DEFINE_RAW_SPINLOCK(vector_lock);
86 
87 static struct ioapic {
88 	/*
89 	 * # of IRQ routing registers
90 	 */
91 	int nr_registers;
92 	/*
93 	 * Saved state during suspend/resume, or while enabling intr-remap.
94 	 */
95 	struct IO_APIC_route_entry *saved_registers;
96 	/* I/O APIC config */
97 	struct mpc_ioapic mp_config;
98 	/* IO APIC gsi routing info */
99 	struct mp_ioapic_gsi  gsi_config;
100 	DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
101 } ioapics[MAX_IO_APICS];
102 
103 #define mpc_ioapic_ver(ioapic_idx)	ioapics[ioapic_idx].mp_config.apicver
104 
105 int mpc_ioapic_id(int ioapic_idx)
106 {
107 	return ioapics[ioapic_idx].mp_config.apicid;
108 }
109 
110 unsigned int mpc_ioapic_addr(int ioapic_idx)
111 {
112 	return ioapics[ioapic_idx].mp_config.apicaddr;
113 }
114 
115 struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic_idx)
116 {
117 	return &ioapics[ioapic_idx].gsi_config;
118 }
119 
120 int nr_ioapics;
121 
122 /* The one past the highest gsi number used */
123 u32 gsi_top;
124 
125 /* MP IRQ source entries */
126 struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
127 
128 /* # of MP IRQ source entries */
129 int mp_irq_entries;
130 
131 #ifdef CONFIG_EISA
132 int mp_bus_id_to_type[MAX_MP_BUSSES];
133 #endif
134 
135 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
136 
137 int skip_ioapic_setup;
138 
139 /**
140  * disable_ioapic_support() - disables ioapic support at runtime
141  */
142 void disable_ioapic_support(void)
143 {
144 #ifdef CONFIG_PCI
145 	noioapicquirk = 1;
146 	noioapicreroute = -1;
147 #endif
148 	skip_ioapic_setup = 1;
149 }
150 
151 static int __init parse_noapic(char *str)
152 {
153 	/* disable IO-APIC */
154 	disable_ioapic_support();
155 	return 0;
156 }
157 early_param("noapic", parse_noapic);
158 
159 static int io_apic_setup_irq_pin(unsigned int irq, int node,
160 				 struct io_apic_irq_attr *attr);
161 
162 /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */
163 void mp_save_irq(struct mpc_intsrc *m)
164 {
165 	int i;
166 
167 	apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
168 		" IRQ %02x, APIC ID %x, APIC INT %02x\n",
169 		m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus,
170 		m->srcbusirq, m->dstapic, m->dstirq);
171 
172 	for (i = 0; i < mp_irq_entries; i++) {
173 		if (!memcmp(&mp_irqs[i], m, sizeof(*m)))
174 			return;
175 	}
176 
177 	memcpy(&mp_irqs[mp_irq_entries], m, sizeof(*m));
178 	if (++mp_irq_entries == MAX_IRQ_SOURCES)
179 		panic("Max # of irq sources exceeded!!\n");
180 }
181 
182 struct irq_pin_list {
183 	int apic, pin;
184 	struct irq_pin_list *next;
185 };
186 
187 static struct irq_pin_list *alloc_irq_pin_list(int node)
188 {
189 	return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node);
190 }
191 
192 
193 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
194 static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY];
195 
196 int __init arch_early_irq_init(void)
197 {
198 	struct irq_cfg *cfg;
199 	int count, node, i;
200 
201 	if (!legacy_pic->nr_legacy_irqs)
202 		io_apic_irqs = ~0UL;
203 
204 	for_each_ioapic(i) {
205 		ioapics[i].saved_registers =
206 			kzalloc(sizeof(struct IO_APIC_route_entry) *
207 				ioapics[i].nr_registers, GFP_KERNEL);
208 		if (!ioapics[i].saved_registers)
209 			pr_err("IOAPIC %d: suspend/resume impossible!\n", i);
210 	}
211 
212 	cfg = irq_cfgx;
213 	count = ARRAY_SIZE(irq_cfgx);
214 	node = cpu_to_node(0);
215 
216 	for (i = 0; i < count; i++) {
217 		irq_set_chip_data(i, &cfg[i]);
218 		zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node);
219 		zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node);
220 		/*
221 		 * For legacy IRQ's, start with assigning irq0 to irq15 to
222 		 * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's.
223 		 */
224 		if (i < legacy_pic->nr_legacy_irqs) {
225 			cfg[i].vector = IRQ0_VECTOR + i;
226 			cpumask_setall(cfg[i].domain);
227 		}
228 	}
229 
230 	return 0;
231 }
232 
233 static inline struct irq_cfg *irq_cfg(unsigned int irq)
234 {
235 	return irq_get_chip_data(irq);
236 }
237 
238 static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
239 {
240 	struct irq_cfg *cfg;
241 
242 	cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
243 	if (!cfg)
244 		return NULL;
245 	if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
246 		goto out_cfg;
247 	if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
248 		goto out_domain;
249 	return cfg;
250 out_domain:
251 	free_cpumask_var(cfg->domain);
252 out_cfg:
253 	kfree(cfg);
254 	return NULL;
255 }
256 
257 static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
258 {
259 	if (!cfg)
260 		return;
261 	irq_set_chip_data(at, NULL);
262 	free_cpumask_var(cfg->domain);
263 	free_cpumask_var(cfg->old_domain);
264 	kfree(cfg);
265 }
266 
267 static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
268 {
269 	int res = irq_alloc_desc_at(at, node);
270 	struct irq_cfg *cfg;
271 
272 	if (res < 0) {
273 		if (res != -EEXIST)
274 			return NULL;
275 		cfg = irq_cfg(at);
276 		if (cfg)
277 			return cfg;
278 	}
279 
280 	cfg = alloc_irq_cfg(at, node);
281 	if (cfg)
282 		irq_set_chip_data(at, cfg);
283 	else
284 		irq_free_desc(at);
285 	return cfg;
286 }
287 
288 struct io_apic {
289 	unsigned int index;
290 	unsigned int unused[3];
291 	unsigned int data;
292 	unsigned int unused2[11];
293 	unsigned int eoi;
294 };
295 
296 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
297 {
298 	return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
299 		+ (mpc_ioapic_addr(idx) & ~PAGE_MASK);
300 }
301 
302 void io_apic_eoi(unsigned int apic, unsigned int vector)
303 {
304 	struct io_apic __iomem *io_apic = io_apic_base(apic);
305 	writel(vector, &io_apic->eoi);
306 }
307 
308 unsigned int native_io_apic_read(unsigned int apic, unsigned int reg)
309 {
310 	struct io_apic __iomem *io_apic = io_apic_base(apic);
311 	writel(reg, &io_apic->index);
312 	return readl(&io_apic->data);
313 }
314 
315 void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
316 {
317 	struct io_apic __iomem *io_apic = io_apic_base(apic);
318 
319 	writel(reg, &io_apic->index);
320 	writel(value, &io_apic->data);
321 }
322 
323 /*
324  * Re-write a value: to be used for read-modify-write
325  * cycles where the read already set up the index register.
326  *
327  * Older SiS APIC requires we rewrite the index register
328  */
329 void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
330 {
331 	struct io_apic __iomem *io_apic = io_apic_base(apic);
332 
333 	if (sis_apic_bug)
334 		writel(reg, &io_apic->index);
335 	writel(value, &io_apic->data);
336 }
337 
338 union entry_union {
339 	struct { u32 w1, w2; };
340 	struct IO_APIC_route_entry entry;
341 };
342 
343 static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin)
344 {
345 	union entry_union eu;
346 
347 	eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
348 	eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
349 
350 	return eu.entry;
351 }
352 
353 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
354 {
355 	union entry_union eu;
356 	unsigned long flags;
357 
358 	raw_spin_lock_irqsave(&ioapic_lock, flags);
359 	eu.entry = __ioapic_read_entry(apic, pin);
360 	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
361 
362 	return eu.entry;
363 }
364 
365 /*
366  * When we write a new IO APIC routing entry, we need to write the high
367  * word first! If the mask bit in the low word is clear, we will enable
368  * the interrupt, and we need to make sure the entry is fully populated
369  * before that happens.
370  */
371 static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
372 {
373 	union entry_union eu = {{0, 0}};
374 
375 	eu.entry = e;
376 	io_apic_write(apic, 0x11 + 2*pin, eu.w2);
377 	io_apic_write(apic, 0x10 + 2*pin, eu.w1);
378 }
379 
380 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
381 {
382 	unsigned long flags;
383 
384 	raw_spin_lock_irqsave(&ioapic_lock, flags);
385 	__ioapic_write_entry(apic, pin, e);
386 	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
387 }
388 
389 /*
390  * When we mask an IO APIC routing entry, we need to write the low
391  * word first, in order to set the mask bit before we change the
392  * high bits!
393  */
394 static void ioapic_mask_entry(int apic, int pin)
395 {
396 	unsigned long flags;
397 	union entry_union eu = { .entry.mask = 1 };
398 
399 	raw_spin_lock_irqsave(&ioapic_lock, flags);
400 	io_apic_write(apic, 0x10 + 2*pin, eu.w1);
401 	io_apic_write(apic, 0x11 + 2*pin, eu.w2);
402 	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
403 }
404 
405 /*
406  * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
407  * shared ISA-space IRQs, so we have to support them. We are super
408  * fast in the common case, and fast for shared ISA-space IRQs.
409  */
410 static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
411 {
412 	struct irq_pin_list **last, *entry;
413 
414 	/* don't allow duplicates */
415 	last = &cfg->irq_2_pin;
416 	for_each_irq_pin(entry, cfg->irq_2_pin) {
417 		if (entry->apic == apic && entry->pin == pin)
418 			return 0;
419 		last = &entry->next;
420 	}
421 
422 	entry = alloc_irq_pin_list(node);
423 	if (!entry) {
424 		pr_err("can not alloc irq_pin_list (%d,%d,%d)\n",
425 		       node, apic, pin);
426 		return -ENOMEM;
427 	}
428 	entry->apic = apic;
429 	entry->pin = pin;
430 
431 	*last = entry;
432 	return 0;
433 }
434 
435 static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
436 {
437 	if (__add_pin_to_irq_node(cfg, node, apic, pin))
438 		panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
439 }
440 
441 /*
442  * Reroute an IRQ to a different pin.
443  */
444 static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node,
445 					   int oldapic, int oldpin,
446 					   int newapic, int newpin)
447 {
448 	struct irq_pin_list *entry;
449 
450 	for_each_irq_pin(entry, cfg->irq_2_pin) {
451 		if (entry->apic == oldapic && entry->pin == oldpin) {
452 			entry->apic = newapic;
453 			entry->pin = newpin;
454 			/* every one is different, right? */
455 			return;
456 		}
457 	}
458 
459 	/* old apic/pin didn't exist, so just add new ones */
460 	add_pin_to_irq_node(cfg, node, newapic, newpin);
461 }
462 
463 static void __io_apic_modify_irq(struct irq_pin_list *entry,
464 				 int mask_and, int mask_or,
465 				 void (*final)(struct irq_pin_list *entry))
466 {
467 	unsigned int reg, pin;
468 
469 	pin = entry->pin;
470 	reg = io_apic_read(entry->apic, 0x10 + pin * 2);
471 	reg &= mask_and;
472 	reg |= mask_or;
473 	io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
474 	if (final)
475 		final(entry);
476 }
477 
478 static void io_apic_modify_irq(struct irq_cfg *cfg,
479 			       int mask_and, int mask_or,
480 			       void (*final)(struct irq_pin_list *entry))
481 {
482 	struct irq_pin_list *entry;
483 
484 	for_each_irq_pin(entry, cfg->irq_2_pin)
485 		__io_apic_modify_irq(entry, mask_and, mask_or, final);
486 }
487 
488 static void io_apic_sync(struct irq_pin_list *entry)
489 {
490 	/*
491 	 * Synchronize the IO-APIC and the CPU by doing
492 	 * a dummy read from the IO-APIC
493 	 */
494 	struct io_apic __iomem *io_apic;
495 
496 	io_apic = io_apic_base(entry->apic);
497 	readl(&io_apic->data);
498 }
499 
500 static void mask_ioapic(struct irq_cfg *cfg)
501 {
502 	unsigned long flags;
503 
504 	raw_spin_lock_irqsave(&ioapic_lock, flags);
505 	io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
506 	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
507 }
508 
509 static void mask_ioapic_irq(struct irq_data *data)
510 {
511 	mask_ioapic(data->chip_data);
512 }
513 
514 static void __unmask_ioapic(struct irq_cfg *cfg)
515 {
516 	io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
517 }
518 
519 static void unmask_ioapic(struct irq_cfg *cfg)
520 {
521 	unsigned long flags;
522 
523 	raw_spin_lock_irqsave(&ioapic_lock, flags);
524 	__unmask_ioapic(cfg);
525 	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
526 }
527 
528 static void unmask_ioapic_irq(struct irq_data *data)
529 {
530 	unmask_ioapic(data->chip_data);
531 }
532 
533 /*
534  * IO-APIC versions below 0x20 don't support EOI register.
535  * For the record, here is the information about various versions:
536  *     0Xh     82489DX
537  *     1Xh     I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant
538  *     2Xh     I/O(x)APIC which is PCI 2.2 Compliant
539  *     30h-FFh Reserved
540  *
541  * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic
542  * version as 0x2. This is an error with documentation and these ICH chips
543  * use io-apic's of version 0x20.
544  *
545  * For IO-APIC's with EOI register, we use that to do an explicit EOI.
546  * Otherwise, we simulate the EOI message manually by changing the trigger
547  * mode to edge and then back to level, with RTE being masked during this.
548  */
549 void native_eoi_ioapic_pin(int apic, int pin, int vector)
550 {
551 	if (mpc_ioapic_ver(apic) >= 0x20) {
552 		io_apic_eoi(apic, vector);
553 	} else {
554 		struct IO_APIC_route_entry entry, entry1;
555 
556 		entry = entry1 = __ioapic_read_entry(apic, pin);
557 
558 		/*
559 		 * Mask the entry and change the trigger mode to edge.
560 		 */
561 		entry1.mask = 1;
562 		entry1.trigger = IOAPIC_EDGE;
563 
564 		__ioapic_write_entry(apic, pin, entry1);
565 
566 		/*
567 		 * Restore the previous level triggered entry.
568 		 */
569 		__ioapic_write_entry(apic, pin, entry);
570 	}
571 }
572 
573 void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
574 {
575 	struct irq_pin_list *entry;
576 	unsigned long flags;
577 
578 	raw_spin_lock_irqsave(&ioapic_lock, flags);
579 	for_each_irq_pin(entry, cfg->irq_2_pin)
580 		x86_io_apic_ops.eoi_ioapic_pin(entry->apic, entry->pin,
581 					       cfg->vector);
582 	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
583 }
584 
585 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
586 {
587 	struct IO_APIC_route_entry entry;
588 
589 	/* Check delivery_mode to be sure we're not clearing an SMI pin */
590 	entry = ioapic_read_entry(apic, pin);
591 	if (entry.delivery_mode == dest_SMI)
592 		return;
593 
594 	/*
595 	 * Make sure the entry is masked and re-read the contents to check
596 	 * if it is a level triggered pin and if the remote-IRR is set.
597 	 */
598 	if (!entry.mask) {
599 		entry.mask = 1;
600 		ioapic_write_entry(apic, pin, entry);
601 		entry = ioapic_read_entry(apic, pin);
602 	}
603 
604 	if (entry.irr) {
605 		unsigned long flags;
606 
607 		/*
608 		 * Make sure the trigger mode is set to level. Explicit EOI
609 		 * doesn't clear the remote-IRR if the trigger mode is not
610 		 * set to level.
611 		 */
612 		if (!entry.trigger) {
613 			entry.trigger = IOAPIC_LEVEL;
614 			ioapic_write_entry(apic, pin, entry);
615 		}
616 
617 		raw_spin_lock_irqsave(&ioapic_lock, flags);
618 		x86_io_apic_ops.eoi_ioapic_pin(apic, pin, entry.vector);
619 		raw_spin_unlock_irqrestore(&ioapic_lock, flags);
620 	}
621 
622 	/*
623 	 * Clear the rest of the bits in the IO-APIC RTE except for the mask
624 	 * bit.
625 	 */
626 	ioapic_mask_entry(apic, pin);
627 	entry = ioapic_read_entry(apic, pin);
628 	if (entry.irr)
629 		pr_err("Unable to reset IRR for apic: %d, pin :%d\n",
630 		       mpc_ioapic_id(apic), pin);
631 }
632 
633 static void clear_IO_APIC (void)
634 {
635 	int apic, pin;
636 
637 	for_each_ioapic_pin(apic, pin)
638 		clear_IO_APIC_pin(apic, pin);
639 }
640 
641 #ifdef CONFIG_X86_32
642 /*
643  * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
644  * specific CPU-side IRQs.
645  */
646 
647 #define MAX_PIRQS 8
648 static int pirq_entries[MAX_PIRQS] = {
649 	[0 ... MAX_PIRQS - 1] = -1
650 };
651 
652 static int __init ioapic_pirq_setup(char *str)
653 {
654 	int i, max;
655 	int ints[MAX_PIRQS+1];
656 
657 	get_options(str, ARRAY_SIZE(ints), ints);
658 
659 	apic_printk(APIC_VERBOSE, KERN_INFO
660 			"PIRQ redirection, working around broken MP-BIOS.\n");
661 	max = MAX_PIRQS;
662 	if (ints[0] < MAX_PIRQS)
663 		max = ints[0];
664 
665 	for (i = 0; i < max; i++) {
666 		apic_printk(APIC_VERBOSE, KERN_DEBUG
667 				"... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
668 		/*
669 		 * PIRQs are mapped upside down, usually.
670 		 */
671 		pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
672 	}
673 	return 1;
674 }
675 
676 __setup("pirq=", ioapic_pirq_setup);
677 #endif /* CONFIG_X86_32 */
678 
679 /*
680  * Saves all the IO-APIC RTE's
681  */
682 int save_ioapic_entries(void)
683 {
684 	int apic, pin;
685 	int err = 0;
686 
687 	for_each_ioapic(apic) {
688 		if (!ioapics[apic].saved_registers) {
689 			err = -ENOMEM;
690 			continue;
691 		}
692 
693 		for_each_pin(apic, pin)
694 			ioapics[apic].saved_registers[pin] =
695 				ioapic_read_entry(apic, pin);
696 	}
697 
698 	return err;
699 }
700 
701 /*
702  * Mask all IO APIC entries.
703  */
704 void mask_ioapic_entries(void)
705 {
706 	int apic, pin;
707 
708 	for_each_ioapic(apic) {
709 		if (!ioapics[apic].saved_registers)
710 			continue;
711 
712 		for_each_pin(apic, pin) {
713 			struct IO_APIC_route_entry entry;
714 
715 			entry = ioapics[apic].saved_registers[pin];
716 			if (!entry.mask) {
717 				entry.mask = 1;
718 				ioapic_write_entry(apic, pin, entry);
719 			}
720 		}
721 	}
722 }
723 
724 /*
725  * Restore IO APIC entries which was saved in the ioapic structure.
726  */
727 int restore_ioapic_entries(void)
728 {
729 	int apic, pin;
730 
731 	for_each_ioapic(apic) {
732 		if (!ioapics[apic].saved_registers)
733 			continue;
734 
735 		for_each_pin(apic, pin)
736 			ioapic_write_entry(apic, pin,
737 					   ioapics[apic].saved_registers[pin]);
738 	}
739 	return 0;
740 }
741 
742 /*
743  * Find the IRQ entry number of a certain pin.
744  */
745 static int find_irq_entry(int ioapic_idx, int pin, int type)
746 {
747 	int i;
748 
749 	for (i = 0; i < mp_irq_entries; i++)
750 		if (mp_irqs[i].irqtype == type &&
751 		    (mp_irqs[i].dstapic == mpc_ioapic_id(ioapic_idx) ||
752 		     mp_irqs[i].dstapic == MP_APIC_ALL) &&
753 		    mp_irqs[i].dstirq == pin)
754 			return i;
755 
756 	return -1;
757 }
758 
759 /*
760  * Find the pin to which IRQ[irq] (ISA) is connected
761  */
762 static int __init find_isa_irq_pin(int irq, int type)
763 {
764 	int i;
765 
766 	for (i = 0; i < mp_irq_entries; i++) {
767 		int lbus = mp_irqs[i].srcbus;
768 
769 		if (test_bit(lbus, mp_bus_not_pci) &&
770 		    (mp_irqs[i].irqtype == type) &&
771 		    (mp_irqs[i].srcbusirq == irq))
772 
773 			return mp_irqs[i].dstirq;
774 	}
775 	return -1;
776 }
777 
778 static int __init find_isa_irq_apic(int irq, int type)
779 {
780 	int i;
781 
782 	for (i = 0; i < mp_irq_entries; i++) {
783 		int lbus = mp_irqs[i].srcbus;
784 
785 		if (test_bit(lbus, mp_bus_not_pci) &&
786 		    (mp_irqs[i].irqtype == type) &&
787 		    (mp_irqs[i].srcbusirq == irq))
788 			break;
789 	}
790 
791 	if (i < mp_irq_entries) {
792 		int ioapic_idx;
793 
794 		for_each_ioapic(ioapic_idx)
795 			if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic)
796 				return ioapic_idx;
797 	}
798 
799 	return -1;
800 }
801 
802 #ifdef CONFIG_EISA
803 /*
804  * EISA Edge/Level control register, ELCR
805  */
806 static int EISA_ELCR(unsigned int irq)
807 {
808 	if (irq < legacy_pic->nr_legacy_irqs) {
809 		unsigned int port = 0x4d0 + (irq >> 3);
810 		return (inb(port) >> (irq & 7)) & 1;
811 	}
812 	apic_printk(APIC_VERBOSE, KERN_INFO
813 			"Broken MPtable reports ISA irq %d\n", irq);
814 	return 0;
815 }
816 
817 #endif
818 
819 /* ISA interrupts are always polarity zero edge triggered,
820  * when listed as conforming in the MP table. */
821 
822 #define default_ISA_trigger(idx)	(0)
823 #define default_ISA_polarity(idx)	(0)
824 
825 /* EISA interrupts are always polarity zero and can be edge or level
826  * trigger depending on the ELCR value.  If an interrupt is listed as
827  * EISA conforming in the MP table, that means its trigger type must
828  * be read in from the ELCR */
829 
830 #define default_EISA_trigger(idx)	(EISA_ELCR(mp_irqs[idx].srcbusirq))
831 #define default_EISA_polarity(idx)	default_ISA_polarity(idx)
832 
833 /* PCI interrupts are always polarity one level triggered,
834  * when listed as conforming in the MP table. */
835 
836 #define default_PCI_trigger(idx)	(1)
837 #define default_PCI_polarity(idx)	(1)
838 
839 static int irq_polarity(int idx)
840 {
841 	int bus = mp_irqs[idx].srcbus;
842 	int polarity;
843 
844 	/*
845 	 * Determine IRQ line polarity (high active or low active):
846 	 */
847 	switch (mp_irqs[idx].irqflag & 3)
848 	{
849 		case 0: /* conforms, ie. bus-type dependent polarity */
850 			if (test_bit(bus, mp_bus_not_pci))
851 				polarity = default_ISA_polarity(idx);
852 			else
853 				polarity = default_PCI_polarity(idx);
854 			break;
855 		case 1: /* high active */
856 		{
857 			polarity = 0;
858 			break;
859 		}
860 		case 2: /* reserved */
861 		{
862 			pr_warn("broken BIOS!!\n");
863 			polarity = 1;
864 			break;
865 		}
866 		case 3: /* low active */
867 		{
868 			polarity = 1;
869 			break;
870 		}
871 		default: /* invalid */
872 		{
873 			pr_warn("broken BIOS!!\n");
874 			polarity = 1;
875 			break;
876 		}
877 	}
878 	return polarity;
879 }
880 
881 static int irq_trigger(int idx)
882 {
883 	int bus = mp_irqs[idx].srcbus;
884 	int trigger;
885 
886 	/*
887 	 * Determine IRQ trigger mode (edge or level sensitive):
888 	 */
889 	switch ((mp_irqs[idx].irqflag>>2) & 3)
890 	{
891 		case 0: /* conforms, ie. bus-type dependent */
892 			if (test_bit(bus, mp_bus_not_pci))
893 				trigger = default_ISA_trigger(idx);
894 			else
895 				trigger = default_PCI_trigger(idx);
896 #ifdef CONFIG_EISA
897 			switch (mp_bus_id_to_type[bus]) {
898 				case MP_BUS_ISA: /* ISA pin */
899 				{
900 					/* set before the switch */
901 					break;
902 				}
903 				case MP_BUS_EISA: /* EISA pin */
904 				{
905 					trigger = default_EISA_trigger(idx);
906 					break;
907 				}
908 				case MP_BUS_PCI: /* PCI pin */
909 				{
910 					/* set before the switch */
911 					break;
912 				}
913 				default:
914 				{
915 					pr_warn("broken BIOS!!\n");
916 					trigger = 1;
917 					break;
918 				}
919 			}
920 #endif
921 			break;
922 		case 1: /* edge */
923 		{
924 			trigger = 0;
925 			break;
926 		}
927 		case 2: /* reserved */
928 		{
929 			pr_warn("broken BIOS!!\n");
930 			trigger = 1;
931 			break;
932 		}
933 		case 3: /* level */
934 		{
935 			trigger = 1;
936 			break;
937 		}
938 		default: /* invalid */
939 		{
940 			pr_warn("broken BIOS!!\n");
941 			trigger = 0;
942 			break;
943 		}
944 	}
945 	return trigger;
946 }
947 
948 static int pin_2_irq(int idx, int apic, int pin)
949 {
950 	int irq;
951 	int bus = mp_irqs[idx].srcbus;
952 	struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(apic);
953 
954 	/*
955 	 * Debugging check, we are in big trouble if this message pops up!
956 	 */
957 	if (mp_irqs[idx].dstirq != pin)
958 		pr_err("broken BIOS or MPTABLE parser, ayiee!!\n");
959 
960 	if (test_bit(bus, mp_bus_not_pci)) {
961 		irq = mp_irqs[idx].srcbusirq;
962 	} else {
963 		u32 gsi = gsi_cfg->gsi_base + pin;
964 
965 		if (gsi >= NR_IRQS_LEGACY)
966 			irq = gsi;
967 		else
968 			irq = gsi_top + gsi;
969 	}
970 
971 #ifdef CONFIG_X86_32
972 	/*
973 	 * PCI IRQ command line redirection. Yes, limits are hardcoded.
974 	 */
975 	if ((pin >= 16) && (pin <= 23)) {
976 		if (pirq_entries[pin-16] != -1) {
977 			if (!pirq_entries[pin-16]) {
978 				apic_printk(APIC_VERBOSE, KERN_DEBUG
979 						"disabling PIRQ%d\n", pin-16);
980 			} else {
981 				irq = pirq_entries[pin-16];
982 				apic_printk(APIC_VERBOSE, KERN_DEBUG
983 						"using PIRQ%d -> IRQ %d\n",
984 						pin-16, irq);
985 			}
986 		}
987 	}
988 #endif
989 
990 	return irq;
991 }
992 
993 /*
994  * Find a specific PCI IRQ entry.
995  * Not an __init, possibly needed by modules
996  */
997 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
998 				struct io_apic_irq_attr *irq_attr)
999 {
1000 	int ioapic_idx, i, best_guess = -1;
1001 
1002 	apic_printk(APIC_DEBUG,
1003 		    "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
1004 		    bus, slot, pin);
1005 	if (test_bit(bus, mp_bus_not_pci)) {
1006 		apic_printk(APIC_VERBOSE,
1007 			    "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
1008 		return -1;
1009 	}
1010 	for (i = 0; i < mp_irq_entries; i++) {
1011 		int lbus = mp_irqs[i].srcbus;
1012 
1013 		for_each_ioapic(ioapic_idx)
1014 			if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic ||
1015 			    mp_irqs[i].dstapic == MP_APIC_ALL)
1016 				break;
1017 
1018 		if (!test_bit(lbus, mp_bus_not_pci) &&
1019 		    mp_irqs[i].irqtype == mp_INT &&
1020 		    (bus == lbus) &&
1021 		    (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
1022 			int irq = pin_2_irq(i, ioapic_idx, mp_irqs[i].dstirq);
1023 
1024 			if (!(ioapic_idx || IO_APIC_IRQ(irq)))
1025 				continue;
1026 
1027 			if (pin == (mp_irqs[i].srcbusirq & 3)) {
1028 				set_io_apic_irq_attr(irq_attr, ioapic_idx,
1029 						     mp_irqs[i].dstirq,
1030 						     irq_trigger(i),
1031 						     irq_polarity(i));
1032 				return irq;
1033 			}
1034 			/*
1035 			 * Use the first all-but-pin matching entry as a
1036 			 * best-guess fuzzy result for broken mptables.
1037 			 */
1038 			if (best_guess < 0) {
1039 				set_io_apic_irq_attr(irq_attr, ioapic_idx,
1040 						     mp_irqs[i].dstirq,
1041 						     irq_trigger(i),
1042 						     irq_polarity(i));
1043 				best_guess = irq;
1044 			}
1045 		}
1046 	}
1047 	return best_guess;
1048 }
1049 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
1050 
1051 void lock_vector_lock(void)
1052 {
1053 	/* Used to the online set of cpus does not change
1054 	 * during assign_irq_vector.
1055 	 */
1056 	raw_spin_lock(&vector_lock);
1057 }
1058 
1059 void unlock_vector_lock(void)
1060 {
1061 	raw_spin_unlock(&vector_lock);
1062 }
1063 
1064 static int
1065 __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1066 {
1067 	/*
1068 	 * NOTE! The local APIC isn't very good at handling
1069 	 * multiple interrupts at the same interrupt level.
1070 	 * As the interrupt level is determined by taking the
1071 	 * vector number and shifting that right by 4, we
1072 	 * want to spread these out a bit so that they don't
1073 	 * all fall in the same interrupt level.
1074 	 *
1075 	 * Also, we've got to be careful not to trash gate
1076 	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1077 	 */
1078 	static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1079 	static int current_offset = VECTOR_OFFSET_START % 16;
1080 	int cpu, err;
1081 	cpumask_var_t tmp_mask;
1082 
1083 	if (cfg->move_in_progress)
1084 		return -EBUSY;
1085 
1086 	if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
1087 		return -ENOMEM;
1088 
1089 	/* Only try and allocate irqs on cpus that are present */
1090 	err = -ENOSPC;
1091 	cpumask_clear(cfg->old_domain);
1092 	cpu = cpumask_first_and(mask, cpu_online_mask);
1093 	while (cpu < nr_cpu_ids) {
1094 		int new_cpu, vector, offset;
1095 
1096 		apic->vector_allocation_domain(cpu, tmp_mask, mask);
1097 
1098 		if (cpumask_subset(tmp_mask, cfg->domain)) {
1099 			err = 0;
1100 			if (cpumask_equal(tmp_mask, cfg->domain))
1101 				break;
1102 			/*
1103 			 * New cpumask using the vector is a proper subset of
1104 			 * the current in use mask. So cleanup the vector
1105 			 * allocation for the members that are not used anymore.
1106 			 */
1107 			cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
1108 			cfg->move_in_progress =
1109 			   cpumask_intersects(cfg->old_domain, cpu_online_mask);
1110 			cpumask_and(cfg->domain, cfg->domain, tmp_mask);
1111 			break;
1112 		}
1113 
1114 		vector = current_vector;
1115 		offset = current_offset;
1116 next:
1117 		vector += 16;
1118 		if (vector >= first_system_vector) {
1119 			offset = (offset + 1) % 16;
1120 			vector = FIRST_EXTERNAL_VECTOR + offset;
1121 		}
1122 
1123 		if (unlikely(current_vector == vector)) {
1124 			cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
1125 			cpumask_andnot(tmp_mask, mask, cfg->old_domain);
1126 			cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
1127 			continue;
1128 		}
1129 
1130 		if (test_bit(vector, used_vectors))
1131 			goto next;
1132 
1133 		for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
1134 			if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNDEFINED)
1135 				goto next;
1136 		}
1137 		/* Found one! */
1138 		current_vector = vector;
1139 		current_offset = offset;
1140 		if (cfg->vector) {
1141 			cpumask_copy(cfg->old_domain, cfg->domain);
1142 			cfg->move_in_progress =
1143 			   cpumask_intersects(cfg->old_domain, cpu_online_mask);
1144 		}
1145 		for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1146 			per_cpu(vector_irq, new_cpu)[vector] = irq;
1147 		cfg->vector = vector;
1148 		cpumask_copy(cfg->domain, tmp_mask);
1149 		err = 0;
1150 		break;
1151 	}
1152 	free_cpumask_var(tmp_mask);
1153 	return err;
1154 }
1155 
1156 int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1157 {
1158 	int err;
1159 	unsigned long flags;
1160 
1161 	raw_spin_lock_irqsave(&vector_lock, flags);
1162 	err = __assign_irq_vector(irq, cfg, mask);
1163 	raw_spin_unlock_irqrestore(&vector_lock, flags);
1164 	return err;
1165 }
1166 
1167 static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1168 {
1169 	int cpu, vector;
1170 
1171 	BUG_ON(!cfg->vector);
1172 
1173 	vector = cfg->vector;
1174 	for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
1175 		per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
1176 
1177 	cfg->vector = 0;
1178 	cpumask_clear(cfg->domain);
1179 
1180 	if (likely(!cfg->move_in_progress))
1181 		return;
1182 	for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
1183 		for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
1184 			if (per_cpu(vector_irq, cpu)[vector] != irq)
1185 				continue;
1186 			per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
1187 			break;
1188 		}
1189 	}
1190 	cfg->move_in_progress = 0;
1191 }
1192 
1193 void __setup_vector_irq(int cpu)
1194 {
1195 	/* Initialize vector_irq on a new cpu */
1196 	int irq, vector;
1197 	struct irq_cfg *cfg;
1198 
1199 	/*
1200 	 * vector_lock will make sure that we don't run into irq vector
1201 	 * assignments that might be happening on another cpu in parallel,
1202 	 * while we setup our initial vector to irq mappings.
1203 	 */
1204 	raw_spin_lock(&vector_lock);
1205 	/* Mark the inuse vectors */
1206 	for_each_active_irq(irq) {
1207 		cfg = irq_cfg(irq);
1208 		if (!cfg)
1209 			continue;
1210 
1211 		if (!cpumask_test_cpu(cpu, cfg->domain))
1212 			continue;
1213 		vector = cfg->vector;
1214 		per_cpu(vector_irq, cpu)[vector] = irq;
1215 	}
1216 	/* Mark the free vectors */
1217 	for (vector = 0; vector < NR_VECTORS; ++vector) {
1218 		irq = per_cpu(vector_irq, cpu)[vector];
1219 		if (irq <= VECTOR_UNDEFINED)
1220 			continue;
1221 
1222 		cfg = irq_cfg(irq);
1223 		if (!cpumask_test_cpu(cpu, cfg->domain))
1224 			per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
1225 	}
1226 	raw_spin_unlock(&vector_lock);
1227 }
1228 
1229 static struct irq_chip ioapic_chip;
1230 
1231 #ifdef CONFIG_X86_32
1232 static inline int IO_APIC_irq_trigger(int irq)
1233 {
1234 	int apic, idx, pin;
1235 
1236 	for_each_ioapic_pin(apic, pin) {
1237 		idx = find_irq_entry(apic, pin, mp_INT);
1238 		if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
1239 			return irq_trigger(idx);
1240 	}
1241 	/*
1242          * nonexistent IRQs are edge default
1243          */
1244 	return 0;
1245 }
1246 #else
1247 static inline int IO_APIC_irq_trigger(int irq)
1248 {
1249 	return 1;
1250 }
1251 #endif
1252 
1253 static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg,
1254 				 unsigned long trigger)
1255 {
1256 	struct irq_chip *chip = &ioapic_chip;
1257 	irq_flow_handler_t hdl;
1258 	bool fasteoi;
1259 
1260 	if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1261 	    trigger == IOAPIC_LEVEL) {
1262 		irq_set_status_flags(irq, IRQ_LEVEL);
1263 		fasteoi = true;
1264 	} else {
1265 		irq_clear_status_flags(irq, IRQ_LEVEL);
1266 		fasteoi = false;
1267 	}
1268 
1269 	if (setup_remapped_irq(irq, cfg, chip))
1270 		fasteoi = trigger != 0;
1271 
1272 	hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq;
1273 	irq_set_chip_and_handler_name(irq, chip, hdl,
1274 				      fasteoi ? "fasteoi" : "edge");
1275 }
1276 
1277 int native_setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
1278 			      unsigned int destination, int vector,
1279 			      struct io_apic_irq_attr *attr)
1280 {
1281 	memset(entry, 0, sizeof(*entry));
1282 
1283 	entry->delivery_mode = apic->irq_delivery_mode;
1284 	entry->dest_mode     = apic->irq_dest_mode;
1285 	entry->dest	     = destination;
1286 	entry->vector	     = vector;
1287 	entry->mask	     = 0;			/* enable IRQ */
1288 	entry->trigger	     = attr->trigger;
1289 	entry->polarity	     = attr->polarity;
1290 
1291 	/*
1292 	 * Mask level triggered irqs.
1293 	 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1294 	 */
1295 	if (attr->trigger)
1296 		entry->mask = 1;
1297 
1298 	return 0;
1299 }
1300 
1301 static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
1302 				struct io_apic_irq_attr *attr)
1303 {
1304 	struct IO_APIC_route_entry entry;
1305 	unsigned int dest;
1306 
1307 	if (!IO_APIC_IRQ(irq))
1308 		return;
1309 
1310 	if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1311 		return;
1312 
1313 	if (apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus(),
1314 					 &dest)) {
1315 		pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n",
1316 			mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
1317 		__clear_irq_vector(irq, cfg);
1318 
1319 		return;
1320 	}
1321 
1322 	apic_printk(APIC_VERBOSE,KERN_DEBUG
1323 		    "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1324 		    "IRQ %d Mode:%i Active:%i Dest:%d)\n",
1325 		    attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin,
1326 		    cfg->vector, irq, attr->trigger, attr->polarity, dest);
1327 
1328 	if (x86_io_apic_ops.setup_entry(irq, &entry, dest, cfg->vector, attr)) {
1329 		pr_warn("Failed to setup ioapic entry for ioapic  %d, pin %d\n",
1330 			mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
1331 		__clear_irq_vector(irq, cfg);
1332 
1333 		return;
1334 	}
1335 
1336 	ioapic_register_intr(irq, cfg, attr->trigger);
1337 	if (irq < legacy_pic->nr_legacy_irqs)
1338 		legacy_pic->mask(irq);
1339 
1340 	ioapic_write_entry(attr->ioapic, attr->ioapic_pin, entry);
1341 }
1342 
1343 static bool __init io_apic_pin_not_connected(int idx, int ioapic_idx, int pin)
1344 {
1345 	if (idx != -1)
1346 		return false;
1347 
1348 	apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n",
1349 		    mpc_ioapic_id(ioapic_idx), pin);
1350 	return true;
1351 }
1352 
1353 static void __init __io_apic_setup_irqs(unsigned int ioapic_idx)
1354 {
1355 	int idx, node = cpu_to_node(0);
1356 	struct io_apic_irq_attr attr;
1357 	unsigned int pin, irq;
1358 
1359 	for_each_pin(ioapic_idx, pin) {
1360 		idx = find_irq_entry(ioapic_idx, pin, mp_INT);
1361 		if (io_apic_pin_not_connected(idx, ioapic_idx, pin))
1362 			continue;
1363 
1364 		irq = pin_2_irq(idx, ioapic_idx, pin);
1365 
1366 		if ((ioapic_idx > 0) && (irq > NR_IRQS_LEGACY))
1367 			continue;
1368 
1369 		/*
1370 		 * Skip the timer IRQ if there's a quirk handler
1371 		 * installed and if it returns 1:
1372 		 */
1373 		if (apic->multi_timer_check &&
1374 		    apic->multi_timer_check(ioapic_idx, irq))
1375 			continue;
1376 
1377 		set_io_apic_irq_attr(&attr, ioapic_idx, pin, irq_trigger(idx),
1378 				     irq_polarity(idx));
1379 
1380 		io_apic_setup_irq_pin(irq, node, &attr);
1381 	}
1382 }
1383 
1384 static void __init setup_IO_APIC_irqs(void)
1385 {
1386 	unsigned int ioapic_idx;
1387 
1388 	apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1389 
1390 	for_each_ioapic(ioapic_idx)
1391 		__io_apic_setup_irqs(ioapic_idx);
1392 }
1393 
1394 /*
1395  * for the gsi that is not in first ioapic
1396  * but could not use acpi_register_gsi()
1397  * like some special sci in IBM x3330
1398  */
1399 void setup_IO_APIC_irq_extra(u32 gsi)
1400 {
1401 	int ioapic_idx = 0, pin, idx, irq, node = cpu_to_node(0);
1402 	struct io_apic_irq_attr attr;
1403 
1404 	/*
1405 	 * Convert 'gsi' to 'ioapic.pin'.
1406 	 */
1407 	ioapic_idx = mp_find_ioapic(gsi);
1408 	if (ioapic_idx < 0)
1409 		return;
1410 
1411 	pin = mp_find_ioapic_pin(ioapic_idx, gsi);
1412 	idx = find_irq_entry(ioapic_idx, pin, mp_INT);
1413 	if (idx == -1)
1414 		return;
1415 
1416 	irq = pin_2_irq(idx, ioapic_idx, pin);
1417 
1418 	/* Only handle the non legacy irqs on secondary ioapics */
1419 	if (ioapic_idx == 0 || irq < NR_IRQS_LEGACY)
1420 		return;
1421 
1422 	set_io_apic_irq_attr(&attr, ioapic_idx, pin, irq_trigger(idx),
1423 			     irq_polarity(idx));
1424 
1425 	io_apic_setup_irq_pin_once(irq, node, &attr);
1426 }
1427 
1428 /*
1429  * Set up the timer pin, possibly with the 8259A-master behind.
1430  */
1431 static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
1432 					unsigned int pin, int vector)
1433 {
1434 	struct IO_APIC_route_entry entry;
1435 	unsigned int dest;
1436 
1437 	memset(&entry, 0, sizeof(entry));
1438 
1439 	/*
1440 	 * We use logical delivery to get the timer IRQ
1441 	 * to the first CPU.
1442 	 */
1443 	if (unlikely(apic->cpu_mask_to_apicid_and(apic->target_cpus(),
1444 						  apic->target_cpus(), &dest)))
1445 		dest = BAD_APICID;
1446 
1447 	entry.dest_mode = apic->irq_dest_mode;
1448 	entry.mask = 0;			/* don't mask IRQ for edge */
1449 	entry.dest = dest;
1450 	entry.delivery_mode = apic->irq_delivery_mode;
1451 	entry.polarity = 0;
1452 	entry.trigger = 0;
1453 	entry.vector = vector;
1454 
1455 	/*
1456 	 * The timer IRQ doesn't have to know that behind the
1457 	 * scene we may have a 8259A-master in AEOI mode ...
1458 	 */
1459 	irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq,
1460 				      "edge");
1461 
1462 	/*
1463 	 * Add it to the IO-APIC irq-routing table:
1464 	 */
1465 	ioapic_write_entry(ioapic_idx, pin, entry);
1466 }
1467 
1468 void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries)
1469 {
1470 	int i;
1471 
1472 	pr_debug(" NR Dst Mask Trig IRR Pol Stat Dmod Deli Vect:\n");
1473 
1474 	for (i = 0; i <= nr_entries; i++) {
1475 		struct IO_APIC_route_entry entry;
1476 
1477 		entry = ioapic_read_entry(apic, i);
1478 
1479 		pr_debug(" %02x %02X  ", i, entry.dest);
1480 		pr_cont("%1d    %1d    %1d   %1d   %1d    "
1481 			"%1d    %1d    %02X\n",
1482 			entry.mask,
1483 			entry.trigger,
1484 			entry.irr,
1485 			entry.polarity,
1486 			entry.delivery_status,
1487 			entry.dest_mode,
1488 			entry.delivery_mode,
1489 			entry.vector);
1490 	}
1491 }
1492 
1493 void intel_ir_io_apic_print_entries(unsigned int apic,
1494 				    unsigned int nr_entries)
1495 {
1496 	int i;
1497 
1498 	pr_debug(" NR Indx Fmt Mask Trig IRR Pol Stat Indx2 Zero Vect:\n");
1499 
1500 	for (i = 0; i <= nr_entries; i++) {
1501 		struct IR_IO_APIC_route_entry *ir_entry;
1502 		struct IO_APIC_route_entry entry;
1503 
1504 		entry = ioapic_read_entry(apic, i);
1505 
1506 		ir_entry = (struct IR_IO_APIC_route_entry *)&entry;
1507 
1508 		pr_debug(" %02x %04X ", i, ir_entry->index);
1509 		pr_cont("%1d   %1d    %1d    %1d   %1d   "
1510 			"%1d    %1d     %X    %02X\n",
1511 			ir_entry->format,
1512 			ir_entry->mask,
1513 			ir_entry->trigger,
1514 			ir_entry->irr,
1515 			ir_entry->polarity,
1516 			ir_entry->delivery_status,
1517 			ir_entry->index2,
1518 			ir_entry->zero,
1519 			ir_entry->vector);
1520 	}
1521 }
1522 
1523 void ioapic_zap_locks(void)
1524 {
1525 	raw_spin_lock_init(&ioapic_lock);
1526 }
1527 
1528 __apicdebuginit(void) print_IO_APIC(int ioapic_idx)
1529 {
1530 	union IO_APIC_reg_00 reg_00;
1531 	union IO_APIC_reg_01 reg_01;
1532 	union IO_APIC_reg_02 reg_02;
1533 	union IO_APIC_reg_03 reg_03;
1534 	unsigned long flags;
1535 
1536 	raw_spin_lock_irqsave(&ioapic_lock, flags);
1537 	reg_00.raw = io_apic_read(ioapic_idx, 0);
1538 	reg_01.raw = io_apic_read(ioapic_idx, 1);
1539 	if (reg_01.bits.version >= 0x10)
1540 		reg_02.raw = io_apic_read(ioapic_idx, 2);
1541 	if (reg_01.bits.version >= 0x20)
1542 		reg_03.raw = io_apic_read(ioapic_idx, 3);
1543 	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1544 
1545 	printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx));
1546 	printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1547 	printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
1548 	printk(KERN_DEBUG ".......    : Delivery Type: %X\n", reg_00.bits.delivery_type);
1549 	printk(KERN_DEBUG ".......    : LTS          : %X\n", reg_00.bits.LTS);
1550 
1551 	printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
1552 	printk(KERN_DEBUG ".......     : max redirection entries: %02X\n",
1553 		reg_01.bits.entries);
1554 
1555 	printk(KERN_DEBUG ".......     : PRQ implemented: %X\n", reg_01.bits.PRQ);
1556 	printk(KERN_DEBUG ".......     : IO APIC version: %02X\n",
1557 		reg_01.bits.version);
1558 
1559 	/*
1560 	 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1561 	 * but the value of reg_02 is read as the previous read register
1562 	 * value, so ignore it if reg_02 == reg_01.
1563 	 */
1564 	if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1565 		printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1566 		printk(KERN_DEBUG ".......     : arbitration: %02X\n", reg_02.bits.arbitration);
1567 	}
1568 
1569 	/*
1570 	 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1571 	 * or reg_03, but the value of reg_0[23] is read as the previous read
1572 	 * register value, so ignore it if reg_03 == reg_0[12].
1573 	 */
1574 	if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1575 	    reg_03.raw != reg_01.raw) {
1576 		printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1577 		printk(KERN_DEBUG ".......     : Boot DT    : %X\n", reg_03.bits.boot_DT);
1578 	}
1579 
1580 	printk(KERN_DEBUG ".... IRQ redirection table:\n");
1581 
1582 	x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries);
1583 }
1584 
1585 __apicdebuginit(void) print_IO_APICs(void)
1586 {
1587 	int ioapic_idx;
1588 	struct irq_cfg *cfg;
1589 	unsigned int irq;
1590 	struct irq_chip *chip;
1591 
1592 	printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1593 	for_each_ioapic(ioapic_idx)
1594 		printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1595 		       mpc_ioapic_id(ioapic_idx),
1596 		       ioapics[ioapic_idx].nr_registers);
1597 
1598 	/*
1599 	 * We are a bit conservative about what we expect.  We have to
1600 	 * know about every hardware change ASAP.
1601 	 */
1602 	printk(KERN_INFO "testing the IO APIC.......................\n");
1603 
1604 	for_each_ioapic(ioapic_idx)
1605 		print_IO_APIC(ioapic_idx);
1606 
1607 	printk(KERN_DEBUG "IRQ to pin mappings:\n");
1608 	for_each_active_irq(irq) {
1609 		struct irq_pin_list *entry;
1610 
1611 		chip = irq_get_chip(irq);
1612 		if (chip != &ioapic_chip)
1613 			continue;
1614 
1615 		cfg = irq_cfg(irq);
1616 		if (!cfg)
1617 			continue;
1618 		entry = cfg->irq_2_pin;
1619 		if (!entry)
1620 			continue;
1621 		printk(KERN_DEBUG "IRQ%d ", irq);
1622 		for_each_irq_pin(entry, cfg->irq_2_pin)
1623 			pr_cont("-> %d:%d", entry->apic, entry->pin);
1624 		pr_cont("\n");
1625 	}
1626 
1627 	printk(KERN_INFO ".................................... done.\n");
1628 }
1629 
1630 __apicdebuginit(void) print_APIC_field(int base)
1631 {
1632 	int i;
1633 
1634 	printk(KERN_DEBUG);
1635 
1636 	for (i = 0; i < 8; i++)
1637 		pr_cont("%08x", apic_read(base + i*0x10));
1638 
1639 	pr_cont("\n");
1640 }
1641 
1642 __apicdebuginit(void) print_local_APIC(void *dummy)
1643 {
1644 	unsigned int i, v, ver, maxlvt;
1645 	u64 icr;
1646 
1647 	printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1648 		smp_processor_id(), hard_smp_processor_id());
1649 	v = apic_read(APIC_ID);
1650 	printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, read_apic_id());
1651 	v = apic_read(APIC_LVR);
1652 	printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1653 	ver = GET_APIC_VERSION(v);
1654 	maxlvt = lapic_get_maxlvt();
1655 
1656 	v = apic_read(APIC_TASKPRI);
1657 	printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1658 
1659 	if (APIC_INTEGRATED(ver)) {                     /* !82489DX */
1660 		if (!APIC_XAPIC(ver)) {
1661 			v = apic_read(APIC_ARBPRI);
1662 			printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1663 			       v & APIC_ARBPRI_MASK);
1664 		}
1665 		v = apic_read(APIC_PROCPRI);
1666 		printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1667 	}
1668 
1669 	/*
1670 	 * Remote read supported only in the 82489DX and local APIC for
1671 	 * Pentium processors.
1672 	 */
1673 	if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1674 		v = apic_read(APIC_RRR);
1675 		printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1676 	}
1677 
1678 	v = apic_read(APIC_LDR);
1679 	printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1680 	if (!x2apic_enabled()) {
1681 		v = apic_read(APIC_DFR);
1682 		printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1683 	}
1684 	v = apic_read(APIC_SPIV);
1685 	printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1686 
1687 	printk(KERN_DEBUG "... APIC ISR field:\n");
1688 	print_APIC_field(APIC_ISR);
1689 	printk(KERN_DEBUG "... APIC TMR field:\n");
1690 	print_APIC_field(APIC_TMR);
1691 	printk(KERN_DEBUG "... APIC IRR field:\n");
1692 	print_APIC_field(APIC_IRR);
1693 
1694 	if (APIC_INTEGRATED(ver)) {             /* !82489DX */
1695 		if (maxlvt > 3)         /* Due to the Pentium erratum 3AP. */
1696 			apic_write(APIC_ESR, 0);
1697 
1698 		v = apic_read(APIC_ESR);
1699 		printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1700 	}
1701 
1702 	icr = apic_icr_read();
1703 	printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1704 	printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1705 
1706 	v = apic_read(APIC_LVTT);
1707 	printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1708 
1709 	if (maxlvt > 3) {                       /* PC is LVT#4. */
1710 		v = apic_read(APIC_LVTPC);
1711 		printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1712 	}
1713 	v = apic_read(APIC_LVT0);
1714 	printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1715 	v = apic_read(APIC_LVT1);
1716 	printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1717 
1718 	if (maxlvt > 2) {			/* ERR is LVT#3. */
1719 		v = apic_read(APIC_LVTERR);
1720 		printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1721 	}
1722 
1723 	v = apic_read(APIC_TMICT);
1724 	printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1725 	v = apic_read(APIC_TMCCT);
1726 	printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1727 	v = apic_read(APIC_TDCR);
1728 	printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1729 
1730 	if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1731 		v = apic_read(APIC_EFEAT);
1732 		maxlvt = (v >> 16) & 0xff;
1733 		printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v);
1734 		v = apic_read(APIC_ECTRL);
1735 		printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v);
1736 		for (i = 0; i < maxlvt; i++) {
1737 			v = apic_read(APIC_EILVTn(i));
1738 			printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v);
1739 		}
1740 	}
1741 	pr_cont("\n");
1742 }
1743 
1744 __apicdebuginit(void) print_local_APICs(int maxcpu)
1745 {
1746 	int cpu;
1747 
1748 	if (!maxcpu)
1749 		return;
1750 
1751 	preempt_disable();
1752 	for_each_online_cpu(cpu) {
1753 		if (cpu >= maxcpu)
1754 			break;
1755 		smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1756 	}
1757 	preempt_enable();
1758 }
1759 
1760 __apicdebuginit(void) print_PIC(void)
1761 {
1762 	unsigned int v;
1763 	unsigned long flags;
1764 
1765 	if (!legacy_pic->nr_legacy_irqs)
1766 		return;
1767 
1768 	printk(KERN_DEBUG "\nprinting PIC contents\n");
1769 
1770 	raw_spin_lock_irqsave(&i8259A_lock, flags);
1771 
1772 	v = inb(0xa1) << 8 | inb(0x21);
1773 	printk(KERN_DEBUG "... PIC  IMR: %04x\n", v);
1774 
1775 	v = inb(0xa0) << 8 | inb(0x20);
1776 	printk(KERN_DEBUG "... PIC  IRR: %04x\n", v);
1777 
1778 	outb(0x0b,0xa0);
1779 	outb(0x0b,0x20);
1780 	v = inb(0xa0) << 8 | inb(0x20);
1781 	outb(0x0a,0xa0);
1782 	outb(0x0a,0x20);
1783 
1784 	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1785 
1786 	printk(KERN_DEBUG "... PIC  ISR: %04x\n", v);
1787 
1788 	v = inb(0x4d1) << 8 | inb(0x4d0);
1789 	printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1790 }
1791 
1792 static int __initdata show_lapic = 1;
1793 static __init int setup_show_lapic(char *arg)
1794 {
1795 	int num = -1;
1796 
1797 	if (strcmp(arg, "all") == 0) {
1798 		show_lapic = CONFIG_NR_CPUS;
1799 	} else {
1800 		get_option(&arg, &num);
1801 		if (num >= 0)
1802 			show_lapic = num;
1803 	}
1804 
1805 	return 1;
1806 }
1807 __setup("show_lapic=", setup_show_lapic);
1808 
1809 __apicdebuginit(int) print_ICs(void)
1810 {
1811 	if (apic_verbosity == APIC_QUIET)
1812 		return 0;
1813 
1814 	print_PIC();
1815 
1816 	/* don't print out if apic is not there */
1817 	if (!cpu_has_apic && !apic_from_smp_config())
1818 		return 0;
1819 
1820 	print_local_APICs(show_lapic);
1821 	print_IO_APICs();
1822 
1823 	return 0;
1824 }
1825 
1826 late_initcall(print_ICs);
1827 
1828 
1829 /* Where if anywhere is the i8259 connect in external int mode */
1830 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1831 
1832 void __init enable_IO_APIC(void)
1833 {
1834 	int i8259_apic, i8259_pin;
1835 	int apic, pin;
1836 
1837 	if (!legacy_pic->nr_legacy_irqs)
1838 		return;
1839 
1840 	for_each_ioapic_pin(apic, pin) {
1841 		/* See if any of the pins is in ExtINT mode */
1842 		struct IO_APIC_route_entry entry = ioapic_read_entry(apic, pin);
1843 
1844 		/* If the interrupt line is enabled and in ExtInt mode
1845 		 * I have found the pin where the i8259 is connected.
1846 		 */
1847 		if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1848 			ioapic_i8259.apic = apic;
1849 			ioapic_i8259.pin  = pin;
1850 			goto found_i8259;
1851 		}
1852 	}
1853  found_i8259:
1854 	/* Look to see what if the MP table has reported the ExtINT */
1855 	/* If we could not find the appropriate pin by looking at the ioapic
1856 	 * the i8259 probably is not connected the ioapic but give the
1857 	 * mptable a chance anyway.
1858 	 */
1859 	i8259_pin  = find_isa_irq_pin(0, mp_ExtINT);
1860 	i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1861 	/* Trust the MP table if nothing is setup in the hardware */
1862 	if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1863 		printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1864 		ioapic_i8259.pin  = i8259_pin;
1865 		ioapic_i8259.apic = i8259_apic;
1866 	}
1867 	/* Complain if the MP table and the hardware disagree */
1868 	if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1869 		(i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1870 	{
1871 		printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1872 	}
1873 
1874 	/*
1875 	 * Do not trust the IO-APIC being empty at bootup
1876 	 */
1877 	clear_IO_APIC();
1878 }
1879 
1880 void native_disable_io_apic(void)
1881 {
1882 	/*
1883 	 * If the i8259 is routed through an IOAPIC
1884 	 * Put that IOAPIC in virtual wire mode
1885 	 * so legacy interrupts can be delivered.
1886 	 */
1887 	if (ioapic_i8259.pin != -1) {
1888 		struct IO_APIC_route_entry entry;
1889 
1890 		memset(&entry, 0, sizeof(entry));
1891 		entry.mask            = 0; /* Enabled */
1892 		entry.trigger         = 0; /* Edge */
1893 		entry.irr             = 0;
1894 		entry.polarity        = 0; /* High */
1895 		entry.delivery_status = 0;
1896 		entry.dest_mode       = 0; /* Physical */
1897 		entry.delivery_mode   = dest_ExtINT; /* ExtInt */
1898 		entry.vector          = 0;
1899 		entry.dest            = read_apic_id();
1900 
1901 		/*
1902 		 * Add it to the IO-APIC irq-routing table:
1903 		 */
1904 		ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1905 	}
1906 
1907 	if (cpu_has_apic || apic_from_smp_config())
1908 		disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1909 
1910 }
1911 
1912 /*
1913  * Not an __init, needed by the reboot code
1914  */
1915 void disable_IO_APIC(void)
1916 {
1917 	/*
1918 	 * Clear the IO-APIC before rebooting:
1919 	 */
1920 	clear_IO_APIC();
1921 
1922 	if (!legacy_pic->nr_legacy_irqs)
1923 		return;
1924 
1925 	x86_io_apic_ops.disable();
1926 }
1927 
1928 #ifdef CONFIG_X86_32
1929 /*
1930  * function to set the IO-APIC physical IDs based on the
1931  * values stored in the MPC table.
1932  *
1933  * by Matt Domsch <Matt_Domsch@dell.com>  Tue Dec 21 12:25:05 CST 1999
1934  */
1935 void __init setup_ioapic_ids_from_mpc_nocheck(void)
1936 {
1937 	union IO_APIC_reg_00 reg_00;
1938 	physid_mask_t phys_id_present_map;
1939 	int ioapic_idx;
1940 	int i;
1941 	unsigned char old_id;
1942 	unsigned long flags;
1943 
1944 	/*
1945 	 * This is broken; anything with a real cpu count has to
1946 	 * circumvent this idiocy regardless.
1947 	 */
1948 	apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map);
1949 
1950 	/*
1951 	 * Set the IOAPIC ID to the value stored in the MPC table.
1952 	 */
1953 	for_each_ioapic(ioapic_idx) {
1954 		/* Read the register 0 value */
1955 		raw_spin_lock_irqsave(&ioapic_lock, flags);
1956 		reg_00.raw = io_apic_read(ioapic_idx, 0);
1957 		raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1958 
1959 		old_id = mpc_ioapic_id(ioapic_idx);
1960 
1961 		if (mpc_ioapic_id(ioapic_idx) >= get_physical_broadcast()) {
1962 			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
1963 				ioapic_idx, mpc_ioapic_id(ioapic_idx));
1964 			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1965 				reg_00.bits.ID);
1966 			ioapics[ioapic_idx].mp_config.apicid = reg_00.bits.ID;
1967 		}
1968 
1969 		/*
1970 		 * Sanity check, is the ID really free? Every APIC in a
1971 		 * system must have a unique ID or we get lots of nice
1972 		 * 'stuck on smp_invalidate_needed IPI wait' messages.
1973 		 */
1974 		if (apic->check_apicid_used(&phys_id_present_map,
1975 					    mpc_ioapic_id(ioapic_idx))) {
1976 			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
1977 				ioapic_idx, mpc_ioapic_id(ioapic_idx));
1978 			for (i = 0; i < get_physical_broadcast(); i++)
1979 				if (!physid_isset(i, phys_id_present_map))
1980 					break;
1981 			if (i >= get_physical_broadcast())
1982 				panic("Max APIC ID exceeded!\n");
1983 			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1984 				i);
1985 			physid_set(i, phys_id_present_map);
1986 			ioapics[ioapic_idx].mp_config.apicid = i;
1987 		} else {
1988 			physid_mask_t tmp;
1989 			apic->apicid_to_cpu_present(mpc_ioapic_id(ioapic_idx),
1990 						    &tmp);
1991 			apic_printk(APIC_VERBOSE, "Setting %d in the "
1992 					"phys_id_present_map\n",
1993 					mpc_ioapic_id(ioapic_idx));
1994 			physids_or(phys_id_present_map, phys_id_present_map, tmp);
1995 		}
1996 
1997 		/*
1998 		 * We need to adjust the IRQ routing table
1999 		 * if the ID changed.
2000 		 */
2001 		if (old_id != mpc_ioapic_id(ioapic_idx))
2002 			for (i = 0; i < mp_irq_entries; i++)
2003 				if (mp_irqs[i].dstapic == old_id)
2004 					mp_irqs[i].dstapic
2005 						= mpc_ioapic_id(ioapic_idx);
2006 
2007 		/*
2008 		 * Update the ID register according to the right value
2009 		 * from the MPC table if they are different.
2010 		 */
2011 		if (mpc_ioapic_id(ioapic_idx) == reg_00.bits.ID)
2012 			continue;
2013 
2014 		apic_printk(APIC_VERBOSE, KERN_INFO
2015 			"...changing IO-APIC physical APIC ID to %d ...",
2016 			mpc_ioapic_id(ioapic_idx));
2017 
2018 		reg_00.bits.ID = mpc_ioapic_id(ioapic_idx);
2019 		raw_spin_lock_irqsave(&ioapic_lock, flags);
2020 		io_apic_write(ioapic_idx, 0, reg_00.raw);
2021 		raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2022 
2023 		/*
2024 		 * Sanity check
2025 		 */
2026 		raw_spin_lock_irqsave(&ioapic_lock, flags);
2027 		reg_00.raw = io_apic_read(ioapic_idx, 0);
2028 		raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2029 		if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx))
2030 			pr_cont("could not set ID!\n");
2031 		else
2032 			apic_printk(APIC_VERBOSE, " ok.\n");
2033 	}
2034 }
2035 
2036 void __init setup_ioapic_ids_from_mpc(void)
2037 {
2038 
2039 	if (acpi_ioapic)
2040 		return;
2041 	/*
2042 	 * Don't check I/O APIC IDs for xAPIC systems.  They have
2043 	 * no meaning without the serial APIC bus.
2044 	 */
2045 	if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2046 		|| APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
2047 		return;
2048 	setup_ioapic_ids_from_mpc_nocheck();
2049 }
2050 #endif
2051 
2052 int no_timer_check __initdata;
2053 
2054 static int __init notimercheck(char *s)
2055 {
2056 	no_timer_check = 1;
2057 	return 1;
2058 }
2059 __setup("no_timer_check", notimercheck);
2060 
2061 /*
2062  * There is a nasty bug in some older SMP boards, their mptable lies
2063  * about the timer IRQ. We do the following to work around the situation:
2064  *
2065  *	- timer IRQ defaults to IO-APIC IRQ
2066  *	- if this function detects that timer IRQs are defunct, then we fall
2067  *	  back to ISA timer IRQs
2068  */
2069 static int __init timer_irq_works(void)
2070 {
2071 	unsigned long t1 = jiffies;
2072 	unsigned long flags;
2073 
2074 	if (no_timer_check)
2075 		return 1;
2076 
2077 	local_save_flags(flags);
2078 	local_irq_enable();
2079 	/* Let ten ticks pass... */
2080 	mdelay((10 * 1000) / HZ);
2081 	local_irq_restore(flags);
2082 
2083 	/*
2084 	 * Expect a few ticks at least, to be sure some possible
2085 	 * glue logic does not lock up after one or two first
2086 	 * ticks in a non-ExtINT mode.  Also the local APIC
2087 	 * might have cached one ExtINT interrupt.  Finally, at
2088 	 * least one tick may be lost due to delays.
2089 	 */
2090 
2091 	/* jiffies wrap? */
2092 	if (time_after(jiffies, t1 + 4))
2093 		return 1;
2094 	return 0;
2095 }
2096 
2097 /*
2098  * In the SMP+IOAPIC case it might happen that there are an unspecified
2099  * number of pending IRQ events unhandled. These cases are very rare,
2100  * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
2101  * better to do it this way as thus we do not have to be aware of
2102  * 'pending' interrupts in the IRQ path, except at this point.
2103  */
2104 /*
2105  * Edge triggered needs to resend any interrupt
2106  * that was delayed but this is now handled in the device
2107  * independent code.
2108  */
2109 
2110 /*
2111  * Starting up a edge-triggered IO-APIC interrupt is
2112  * nasty - we need to make sure that we get the edge.
2113  * If it is already asserted for some reason, we need
2114  * return 1 to indicate that is was pending.
2115  *
2116  * This is not complete - we should be able to fake
2117  * an edge even if it isn't on the 8259A...
2118  */
2119 
2120 static unsigned int startup_ioapic_irq(struct irq_data *data)
2121 {
2122 	int was_pending = 0, irq = data->irq;
2123 	unsigned long flags;
2124 
2125 	raw_spin_lock_irqsave(&ioapic_lock, flags);
2126 	if (irq < legacy_pic->nr_legacy_irqs) {
2127 		legacy_pic->mask(irq);
2128 		if (legacy_pic->irq_pending(irq))
2129 			was_pending = 1;
2130 	}
2131 	__unmask_ioapic(data->chip_data);
2132 	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2133 
2134 	return was_pending;
2135 }
2136 
2137 static int ioapic_retrigger_irq(struct irq_data *data)
2138 {
2139 	struct irq_cfg *cfg = data->chip_data;
2140 	unsigned long flags;
2141 	int cpu;
2142 
2143 	raw_spin_lock_irqsave(&vector_lock, flags);
2144 	cpu = cpumask_first_and(cfg->domain, cpu_online_mask);
2145 	apic->send_IPI_mask(cpumask_of(cpu), cfg->vector);
2146 	raw_spin_unlock_irqrestore(&vector_lock, flags);
2147 
2148 	return 1;
2149 }
2150 
2151 /*
2152  * Level and edge triggered IO-APIC interrupts need different handling,
2153  * so we use two separate IRQ descriptors. Edge triggered IRQs can be
2154  * handled with the level-triggered descriptor, but that one has slightly
2155  * more overhead. Level-triggered interrupts cannot be handled with the
2156  * edge-triggered handler, without risking IRQ storms and other ugly
2157  * races.
2158  */
2159 
2160 #ifdef CONFIG_SMP
2161 void send_cleanup_vector(struct irq_cfg *cfg)
2162 {
2163 	cpumask_var_t cleanup_mask;
2164 
2165 	if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
2166 		unsigned int i;
2167 		for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
2168 			apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
2169 	} else {
2170 		cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
2171 		apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2172 		free_cpumask_var(cleanup_mask);
2173 	}
2174 	cfg->move_in_progress = 0;
2175 }
2176 
2177 asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
2178 {
2179 	unsigned vector, me;
2180 
2181 	ack_APIC_irq();
2182 	irq_enter();
2183 	exit_idle();
2184 
2185 	me = smp_processor_id();
2186 	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
2187 		int irq;
2188 		unsigned int irr;
2189 		struct irq_desc *desc;
2190 		struct irq_cfg *cfg;
2191 		irq = __this_cpu_read(vector_irq[vector]);
2192 
2193 		if (irq <= VECTOR_UNDEFINED)
2194 			continue;
2195 
2196 		desc = irq_to_desc(irq);
2197 		if (!desc)
2198 			continue;
2199 
2200 		cfg = irq_cfg(irq);
2201 		if (!cfg)
2202 			continue;
2203 
2204 		raw_spin_lock(&desc->lock);
2205 
2206 		/*
2207 		 * Check if the irq migration is in progress. If so, we
2208 		 * haven't received the cleanup request yet for this irq.
2209 		 */
2210 		if (cfg->move_in_progress)
2211 			goto unlock;
2212 
2213 		if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2214 			goto unlock;
2215 
2216 		irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
2217 		/*
2218 		 * Check if the vector that needs to be cleanedup is
2219 		 * registered at the cpu's IRR. If so, then this is not
2220 		 * the best time to clean it up. Lets clean it up in the
2221 		 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
2222 		 * to myself.
2223 		 */
2224 		if (irr  & (1 << (vector % 32))) {
2225 			apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
2226 			goto unlock;
2227 		}
2228 		__this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
2229 unlock:
2230 		raw_spin_unlock(&desc->lock);
2231 	}
2232 
2233 	irq_exit();
2234 }
2235 
2236 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
2237 {
2238 	unsigned me;
2239 
2240 	if (likely(!cfg->move_in_progress))
2241 		return;
2242 
2243 	me = smp_processor_id();
2244 
2245 	if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2246 		send_cleanup_vector(cfg);
2247 }
2248 
2249 static void irq_complete_move(struct irq_cfg *cfg)
2250 {
2251 	__irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
2252 }
2253 
2254 void irq_force_complete_move(int irq)
2255 {
2256 	struct irq_cfg *cfg = irq_cfg(irq);
2257 
2258 	if (!cfg)
2259 		return;
2260 
2261 	__irq_complete_move(cfg, cfg->vector);
2262 }
2263 #else
2264 static inline void irq_complete_move(struct irq_cfg *cfg) { }
2265 #endif
2266 
2267 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
2268 {
2269 	int apic, pin;
2270 	struct irq_pin_list *entry;
2271 	u8 vector = cfg->vector;
2272 
2273 	for_each_irq_pin(entry, cfg->irq_2_pin) {
2274 		unsigned int reg;
2275 
2276 		apic = entry->apic;
2277 		pin = entry->pin;
2278 
2279 		io_apic_write(apic, 0x11 + pin*2, dest);
2280 		reg = io_apic_read(apic, 0x10 + pin*2);
2281 		reg &= ~IO_APIC_REDIR_VECTOR_MASK;
2282 		reg |= vector;
2283 		io_apic_modify(apic, 0x10 + pin*2, reg);
2284 	}
2285 }
2286 
2287 /*
2288  * Either sets data->affinity to a valid value, and returns
2289  * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
2290  * leaves data->affinity untouched.
2291  */
2292 int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2293 			  unsigned int *dest_id)
2294 {
2295 	struct irq_cfg *cfg = data->chip_data;
2296 	unsigned int irq = data->irq;
2297 	int err;
2298 
2299 	if (!config_enabled(CONFIG_SMP))
2300 		return -EPERM;
2301 
2302 	if (!cpumask_intersects(mask, cpu_online_mask))
2303 		return -EINVAL;
2304 
2305 	err = assign_irq_vector(irq, cfg, mask);
2306 	if (err)
2307 		return err;
2308 
2309 	err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
2310 	if (err) {
2311 		if (assign_irq_vector(irq, cfg, data->affinity))
2312 			pr_err("Failed to recover vector for irq %d\n", irq);
2313 		return err;
2314 	}
2315 
2316 	cpumask_copy(data->affinity, mask);
2317 
2318 	return 0;
2319 }
2320 
2321 
2322 int native_ioapic_set_affinity(struct irq_data *data,
2323 			       const struct cpumask *mask,
2324 			       bool force)
2325 {
2326 	unsigned int dest, irq = data->irq;
2327 	unsigned long flags;
2328 	int ret;
2329 
2330 	if (!config_enabled(CONFIG_SMP))
2331 		return -EPERM;
2332 
2333 	raw_spin_lock_irqsave(&ioapic_lock, flags);
2334 	ret = __ioapic_set_affinity(data, mask, &dest);
2335 	if (!ret) {
2336 		/* Only the high 8 bits are valid. */
2337 		dest = SET_APIC_LOGICAL_ID(dest);
2338 		__target_IO_APIC_irq(irq, dest, data->chip_data);
2339 		ret = IRQ_SET_MASK_OK_NOCOPY;
2340 	}
2341 	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2342 	return ret;
2343 }
2344 
2345 static void ack_apic_edge(struct irq_data *data)
2346 {
2347 	irq_complete_move(data->chip_data);
2348 	irq_move_irq(data);
2349 	ack_APIC_irq();
2350 }
2351 
2352 atomic_t irq_mis_count;
2353 
2354 #ifdef CONFIG_GENERIC_PENDING_IRQ
2355 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
2356 {
2357 	struct irq_pin_list *entry;
2358 	unsigned long flags;
2359 
2360 	raw_spin_lock_irqsave(&ioapic_lock, flags);
2361 	for_each_irq_pin(entry, cfg->irq_2_pin) {
2362 		unsigned int reg;
2363 		int pin;
2364 
2365 		pin = entry->pin;
2366 		reg = io_apic_read(entry->apic, 0x10 + pin*2);
2367 		/* Is the remote IRR bit set? */
2368 		if (reg & IO_APIC_REDIR_REMOTE_IRR) {
2369 			raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2370 			return true;
2371 		}
2372 	}
2373 	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2374 
2375 	return false;
2376 }
2377 
2378 static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
2379 {
2380 	/* If we are moving the irq we need to mask it */
2381 	if (unlikely(irqd_is_setaffinity_pending(data))) {
2382 		mask_ioapic(cfg);
2383 		return true;
2384 	}
2385 	return false;
2386 }
2387 
2388 static inline void ioapic_irqd_unmask(struct irq_data *data,
2389 				      struct irq_cfg *cfg, bool masked)
2390 {
2391 	if (unlikely(masked)) {
2392 		/* Only migrate the irq if the ack has been received.
2393 		 *
2394 		 * On rare occasions the broadcast level triggered ack gets
2395 		 * delayed going to ioapics, and if we reprogram the
2396 		 * vector while Remote IRR is still set the irq will never
2397 		 * fire again.
2398 		 *
2399 		 * To prevent this scenario we read the Remote IRR bit
2400 		 * of the ioapic.  This has two effects.
2401 		 * - On any sane system the read of the ioapic will
2402 		 *   flush writes (and acks) going to the ioapic from
2403 		 *   this cpu.
2404 		 * - We get to see if the ACK has actually been delivered.
2405 		 *
2406 		 * Based on failed experiments of reprogramming the
2407 		 * ioapic entry from outside of irq context starting
2408 		 * with masking the ioapic entry and then polling until
2409 		 * Remote IRR was clear before reprogramming the
2410 		 * ioapic I don't trust the Remote IRR bit to be
2411 		 * completey accurate.
2412 		 *
2413 		 * However there appears to be no other way to plug
2414 		 * this race, so if the Remote IRR bit is not
2415 		 * accurate and is causing problems then it is a hardware bug
2416 		 * and you can go talk to the chipset vendor about it.
2417 		 */
2418 		if (!io_apic_level_ack_pending(cfg))
2419 			irq_move_masked_irq(data);
2420 		unmask_ioapic(cfg);
2421 	}
2422 }
2423 #else
2424 static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
2425 {
2426 	return false;
2427 }
2428 static inline void ioapic_irqd_unmask(struct irq_data *data,
2429 				      struct irq_cfg *cfg, bool masked)
2430 {
2431 }
2432 #endif
2433 
2434 static void ack_apic_level(struct irq_data *data)
2435 {
2436 	struct irq_cfg *cfg = data->chip_data;
2437 	int i, irq = data->irq;
2438 	unsigned long v;
2439 	bool masked;
2440 
2441 	irq_complete_move(cfg);
2442 	masked = ioapic_irqd_mask(data, cfg);
2443 
2444 	/*
2445 	 * It appears there is an erratum which affects at least version 0x11
2446 	 * of I/O APIC (that's the 82093AA and cores integrated into various
2447 	 * chipsets).  Under certain conditions a level-triggered interrupt is
2448 	 * erroneously delivered as edge-triggered one but the respective IRR
2449 	 * bit gets set nevertheless.  As a result the I/O unit expects an EOI
2450 	 * message but it will never arrive and further interrupts are blocked
2451 	 * from the source.  The exact reason is so far unknown, but the
2452 	 * phenomenon was observed when two consecutive interrupt requests
2453 	 * from a given source get delivered to the same CPU and the source is
2454 	 * temporarily disabled in between.
2455 	 *
2456 	 * A workaround is to simulate an EOI message manually.  We achieve it
2457 	 * by setting the trigger mode to edge and then to level when the edge
2458 	 * trigger mode gets detected in the TMR of a local APIC for a
2459 	 * level-triggered interrupt.  We mask the source for the time of the
2460 	 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2461 	 * The idea is from Manfred Spraul.  --macro
2462 	 *
2463 	 * Also in the case when cpu goes offline, fixup_irqs() will forward
2464 	 * any unhandled interrupt on the offlined cpu to the new cpu
2465 	 * destination that is handling the corresponding interrupt. This
2466 	 * interrupt forwarding is done via IPI's. Hence, in this case also
2467 	 * level-triggered io-apic interrupt will be seen as an edge
2468 	 * interrupt in the IRR. And we can't rely on the cpu's EOI
2469 	 * to be broadcasted to the IO-APIC's which will clear the remoteIRR
2470 	 * corresponding to the level-triggered interrupt. Hence on IO-APIC's
2471 	 * supporting EOI register, we do an explicit EOI to clear the
2472 	 * remote IRR and on IO-APIC's which don't have an EOI register,
2473 	 * we use the above logic (mask+edge followed by unmask+level) from
2474 	 * Manfred Spraul to clear the remote IRR.
2475 	 */
2476 	i = cfg->vector;
2477 	v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2478 
2479 	/*
2480 	 * We must acknowledge the irq before we move it or the acknowledge will
2481 	 * not propagate properly.
2482 	 */
2483 	ack_APIC_irq();
2484 
2485 	/*
2486 	 * Tail end of clearing remote IRR bit (either by delivering the EOI
2487 	 * message via io-apic EOI register write or simulating it using
2488 	 * mask+edge followed by unnask+level logic) manually when the
2489 	 * level triggered interrupt is seen as the edge triggered interrupt
2490 	 * at the cpu.
2491 	 */
2492 	if (!(v & (1 << (i & 0x1f)))) {
2493 		atomic_inc(&irq_mis_count);
2494 
2495 		eoi_ioapic_irq(irq, cfg);
2496 	}
2497 
2498 	ioapic_irqd_unmask(data, cfg, masked);
2499 }
2500 
2501 static struct irq_chip ioapic_chip __read_mostly = {
2502 	.name			= "IO-APIC",
2503 	.irq_startup		= startup_ioapic_irq,
2504 	.irq_mask		= mask_ioapic_irq,
2505 	.irq_unmask		= unmask_ioapic_irq,
2506 	.irq_ack		= ack_apic_edge,
2507 	.irq_eoi		= ack_apic_level,
2508 	.irq_set_affinity	= native_ioapic_set_affinity,
2509 	.irq_retrigger		= ioapic_retrigger_irq,
2510 };
2511 
2512 static inline void init_IO_APIC_traps(void)
2513 {
2514 	struct irq_cfg *cfg;
2515 	unsigned int irq;
2516 
2517 	for_each_active_irq(irq) {
2518 		cfg = irq_cfg(irq);
2519 		if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
2520 			/*
2521 			 * Hmm.. We don't have an entry for this,
2522 			 * so default to an old-fashioned 8259
2523 			 * interrupt if we can..
2524 			 */
2525 			if (irq < legacy_pic->nr_legacy_irqs)
2526 				legacy_pic->make_irq(irq);
2527 			else
2528 				/* Strange. Oh, well.. */
2529 				irq_set_chip(irq, &no_irq_chip);
2530 		}
2531 	}
2532 }
2533 
2534 /*
2535  * The local APIC irq-chip implementation:
2536  */
2537 
2538 static void mask_lapic_irq(struct irq_data *data)
2539 {
2540 	unsigned long v;
2541 
2542 	v = apic_read(APIC_LVT0);
2543 	apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2544 }
2545 
2546 static void unmask_lapic_irq(struct irq_data *data)
2547 {
2548 	unsigned long v;
2549 
2550 	v = apic_read(APIC_LVT0);
2551 	apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2552 }
2553 
2554 static void ack_lapic_irq(struct irq_data *data)
2555 {
2556 	ack_APIC_irq();
2557 }
2558 
2559 static struct irq_chip lapic_chip __read_mostly = {
2560 	.name		= "local-APIC",
2561 	.irq_mask	= mask_lapic_irq,
2562 	.irq_unmask	= unmask_lapic_irq,
2563 	.irq_ack	= ack_lapic_irq,
2564 };
2565 
2566 static void lapic_register_intr(int irq)
2567 {
2568 	irq_clear_status_flags(irq, IRQ_LEVEL);
2569 	irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2570 				      "edge");
2571 }
2572 
2573 /*
2574  * This looks a bit hackish but it's about the only one way of sending
2575  * a few INTA cycles to 8259As and any associated glue logic.  ICR does
2576  * not support the ExtINT mode, unfortunately.  We need to send these
2577  * cycles as some i82489DX-based boards have glue logic that keeps the
2578  * 8259A interrupt line asserted until INTA.  --macro
2579  */
2580 static inline void __init unlock_ExtINT_logic(void)
2581 {
2582 	int apic, pin, i;
2583 	struct IO_APIC_route_entry entry0, entry1;
2584 	unsigned char save_control, save_freq_select;
2585 
2586 	pin  = find_isa_irq_pin(8, mp_INT);
2587 	if (pin == -1) {
2588 		WARN_ON_ONCE(1);
2589 		return;
2590 	}
2591 	apic = find_isa_irq_apic(8, mp_INT);
2592 	if (apic == -1) {
2593 		WARN_ON_ONCE(1);
2594 		return;
2595 	}
2596 
2597 	entry0 = ioapic_read_entry(apic, pin);
2598 	clear_IO_APIC_pin(apic, pin);
2599 
2600 	memset(&entry1, 0, sizeof(entry1));
2601 
2602 	entry1.dest_mode = 0;			/* physical delivery */
2603 	entry1.mask = 0;			/* unmask IRQ now */
2604 	entry1.dest = hard_smp_processor_id();
2605 	entry1.delivery_mode = dest_ExtINT;
2606 	entry1.polarity = entry0.polarity;
2607 	entry1.trigger = 0;
2608 	entry1.vector = 0;
2609 
2610 	ioapic_write_entry(apic, pin, entry1);
2611 
2612 	save_control = CMOS_READ(RTC_CONTROL);
2613 	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2614 	CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2615 		   RTC_FREQ_SELECT);
2616 	CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2617 
2618 	i = 100;
2619 	while (i-- > 0) {
2620 		mdelay(10);
2621 		if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2622 			i -= 10;
2623 	}
2624 
2625 	CMOS_WRITE(save_control, RTC_CONTROL);
2626 	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2627 	clear_IO_APIC_pin(apic, pin);
2628 
2629 	ioapic_write_entry(apic, pin, entry0);
2630 }
2631 
2632 static int disable_timer_pin_1 __initdata;
2633 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2634 static int __init disable_timer_pin_setup(char *arg)
2635 {
2636 	disable_timer_pin_1 = 1;
2637 	return 0;
2638 }
2639 early_param("disable_timer_pin_1", disable_timer_pin_setup);
2640 
2641 /*
2642  * This code may look a bit paranoid, but it's supposed to cooperate with
2643  * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
2644  * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
2645  * fanatically on his truly buggy board.
2646  *
2647  * FIXME: really need to revamp this for all platforms.
2648  */
2649 static inline void __init check_timer(void)
2650 {
2651 	struct irq_cfg *cfg = irq_cfg(0);
2652 	int node = cpu_to_node(0);
2653 	int apic1, pin1, apic2, pin2;
2654 	unsigned long flags;
2655 	int no_pin1 = 0;
2656 
2657 	local_irq_save(flags);
2658 
2659 	/*
2660 	 * get/set the timer IRQ vector:
2661 	 */
2662 	legacy_pic->mask(0);
2663 	assign_irq_vector(0, cfg, apic->target_cpus());
2664 
2665 	/*
2666 	 * As IRQ0 is to be enabled in the 8259A, the virtual
2667 	 * wire has to be disabled in the local APIC.  Also
2668 	 * timer interrupts need to be acknowledged manually in
2669 	 * the 8259A for the i82489DX when using the NMI
2670 	 * watchdog as that APIC treats NMIs as level-triggered.
2671 	 * The AEOI mode will finish them in the 8259A
2672 	 * automatically.
2673 	 */
2674 	apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2675 	legacy_pic->init(1);
2676 
2677 	pin1  = find_isa_irq_pin(0, mp_INT);
2678 	apic1 = find_isa_irq_apic(0, mp_INT);
2679 	pin2  = ioapic_i8259.pin;
2680 	apic2 = ioapic_i8259.apic;
2681 
2682 	apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2683 		    "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2684 		    cfg->vector, apic1, pin1, apic2, pin2);
2685 
2686 	/*
2687 	 * Some BIOS writers are clueless and report the ExtINTA
2688 	 * I/O APIC input from the cascaded 8259A as the timer
2689 	 * interrupt input.  So just in case, if only one pin
2690 	 * was found above, try it both directly and through the
2691 	 * 8259A.
2692 	 */
2693 	if (pin1 == -1) {
2694 		panic_if_irq_remap("BIOS bug: timer not connected to IO-APIC");
2695 		pin1 = pin2;
2696 		apic1 = apic2;
2697 		no_pin1 = 1;
2698 	} else if (pin2 == -1) {
2699 		pin2 = pin1;
2700 		apic2 = apic1;
2701 	}
2702 
2703 	if (pin1 != -1) {
2704 		/*
2705 		 * Ok, does IRQ0 through the IOAPIC work?
2706 		 */
2707 		if (no_pin1) {
2708 			add_pin_to_irq_node(cfg, node, apic1, pin1);
2709 			setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2710 		} else {
2711 			/* for edge trigger, setup_ioapic_irq already
2712 			 * leave it unmasked.
2713 			 * so only need to unmask if it is level-trigger
2714 			 * do we really have level trigger timer?
2715 			 */
2716 			int idx;
2717 			idx = find_irq_entry(apic1, pin1, mp_INT);
2718 			if (idx != -1 && irq_trigger(idx))
2719 				unmask_ioapic(cfg);
2720 		}
2721 		if (timer_irq_works()) {
2722 			if (disable_timer_pin_1 > 0)
2723 				clear_IO_APIC_pin(0, pin1);
2724 			goto out;
2725 		}
2726 		panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC");
2727 		local_irq_disable();
2728 		clear_IO_APIC_pin(apic1, pin1);
2729 		if (!no_pin1)
2730 			apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2731 				    "8254 timer not connected to IO-APIC\n");
2732 
2733 		apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2734 			    "(IRQ0) through the 8259A ...\n");
2735 		apic_printk(APIC_QUIET, KERN_INFO
2736 			    "..... (found apic %d pin %d) ...\n", apic2, pin2);
2737 		/*
2738 		 * legacy devices should be connected to IO APIC #0
2739 		 */
2740 		replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2);
2741 		setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2742 		legacy_pic->unmask(0);
2743 		if (timer_irq_works()) {
2744 			apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2745 			goto out;
2746 		}
2747 		/*
2748 		 * Cleanup, just in case ...
2749 		 */
2750 		local_irq_disable();
2751 		legacy_pic->mask(0);
2752 		clear_IO_APIC_pin(apic2, pin2);
2753 		apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2754 	}
2755 
2756 	apic_printk(APIC_QUIET, KERN_INFO
2757 		    "...trying to set up timer as Virtual Wire IRQ...\n");
2758 
2759 	lapic_register_intr(0);
2760 	apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector);	/* Fixed mode */
2761 	legacy_pic->unmask(0);
2762 
2763 	if (timer_irq_works()) {
2764 		apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2765 		goto out;
2766 	}
2767 	local_irq_disable();
2768 	legacy_pic->mask(0);
2769 	apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2770 	apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
2771 
2772 	apic_printk(APIC_QUIET, KERN_INFO
2773 		    "...trying to set up timer as ExtINT IRQ...\n");
2774 
2775 	legacy_pic->init(0);
2776 	legacy_pic->make_irq(0);
2777 	apic_write(APIC_LVT0, APIC_DM_EXTINT);
2778 
2779 	unlock_ExtINT_logic();
2780 
2781 	if (timer_irq_works()) {
2782 		apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2783 		goto out;
2784 	}
2785 	local_irq_disable();
2786 	apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2787 	if (x2apic_preenabled)
2788 		apic_printk(APIC_QUIET, KERN_INFO
2789 			    "Perhaps problem with the pre-enabled x2apic mode\n"
2790 			    "Try booting with x2apic and interrupt-remapping disabled in the bios.\n");
2791 	panic("IO-APIC + timer doesn't work!  Boot with apic=debug and send a "
2792 		"report.  Then try booting with the 'noapic' option.\n");
2793 out:
2794 	local_irq_restore(flags);
2795 }
2796 
2797 /*
2798  * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2799  * to devices.  However there may be an I/O APIC pin available for
2800  * this interrupt regardless.  The pin may be left unconnected, but
2801  * typically it will be reused as an ExtINT cascade interrupt for
2802  * the master 8259A.  In the MPS case such a pin will normally be
2803  * reported as an ExtINT interrupt in the MP table.  With ACPI
2804  * there is no provision for ExtINT interrupts, and in the absence
2805  * of an override it would be treated as an ordinary ISA I/O APIC
2806  * interrupt, that is edge-triggered and unmasked by default.  We
2807  * used to do this, but it caused problems on some systems because
2808  * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2809  * the same ExtINT cascade interrupt to drive the local APIC of the
2810  * bootstrap processor.  Therefore we refrain from routing IRQ2 to
2811  * the I/O APIC in all cases now.  No actual device should request
2812  * it anyway.  --macro
2813  */
2814 #define PIC_IRQS	(1UL << PIC_CASCADE_IR)
2815 
2816 void __init setup_IO_APIC(void)
2817 {
2818 
2819 	/*
2820 	 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
2821 	 */
2822 	io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL;
2823 
2824 	apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2825 	/*
2826          * Set up IO-APIC IRQ routing.
2827          */
2828 	x86_init.mpparse.setup_ioapic_ids();
2829 
2830 	sync_Arb_IDs();
2831 	setup_IO_APIC_irqs();
2832 	init_IO_APIC_traps();
2833 	if (legacy_pic->nr_legacy_irqs)
2834 		check_timer();
2835 }
2836 
2837 /*
2838  *      Called after all the initialization is done. If we didn't find any
2839  *      APIC bugs then we can allow the modify fast path
2840  */
2841 
2842 static int __init io_apic_bug_finalize(void)
2843 {
2844 	if (sis_apic_bug == -1)
2845 		sis_apic_bug = 0;
2846 	return 0;
2847 }
2848 
2849 late_initcall(io_apic_bug_finalize);
2850 
2851 static void resume_ioapic_id(int ioapic_idx)
2852 {
2853 	unsigned long flags;
2854 	union IO_APIC_reg_00 reg_00;
2855 
2856 	raw_spin_lock_irqsave(&ioapic_lock, flags);
2857 	reg_00.raw = io_apic_read(ioapic_idx, 0);
2858 	if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) {
2859 		reg_00.bits.ID = mpc_ioapic_id(ioapic_idx);
2860 		io_apic_write(ioapic_idx, 0, reg_00.raw);
2861 	}
2862 	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2863 }
2864 
2865 static void ioapic_resume(void)
2866 {
2867 	int ioapic_idx;
2868 
2869 	for_each_ioapic_reverse(ioapic_idx)
2870 		resume_ioapic_id(ioapic_idx);
2871 
2872 	restore_ioapic_entries();
2873 }
2874 
2875 static struct syscore_ops ioapic_syscore_ops = {
2876 	.suspend = save_ioapic_entries,
2877 	.resume = ioapic_resume,
2878 };
2879 
2880 static int __init ioapic_init_ops(void)
2881 {
2882 	register_syscore_ops(&ioapic_syscore_ops);
2883 
2884 	return 0;
2885 }
2886 
2887 device_initcall(ioapic_init_ops);
2888 
2889 /*
2890  * Dynamic irq allocate and deallocation. Should be replaced by irq domains!
2891  */
2892 int arch_setup_hwirq(unsigned int irq, int node)
2893 {
2894 	struct irq_cfg *cfg;
2895 	unsigned long flags;
2896 	int ret;
2897 
2898 	cfg = alloc_irq_cfg(irq, node);
2899 	if (!cfg)
2900 		return -ENOMEM;
2901 
2902 	raw_spin_lock_irqsave(&vector_lock, flags);
2903 	ret = __assign_irq_vector(irq, cfg, apic->target_cpus());
2904 	raw_spin_unlock_irqrestore(&vector_lock, flags);
2905 
2906 	if (!ret)
2907 		irq_set_chip_data(irq, cfg);
2908 	else
2909 		free_irq_cfg(irq, cfg);
2910 	return ret;
2911 }
2912 
2913 void arch_teardown_hwirq(unsigned int irq)
2914 {
2915 	struct irq_cfg *cfg = irq_cfg(irq);
2916 	unsigned long flags;
2917 
2918 	free_remapped_irq(irq);
2919 	raw_spin_lock_irqsave(&vector_lock, flags);
2920 	__clear_irq_vector(irq, cfg);
2921 	raw_spin_unlock_irqrestore(&vector_lock, flags);
2922 	free_irq_cfg(irq, cfg);
2923 }
2924 
2925 /*
2926  * MSI message composition
2927  */
2928 void native_compose_msi_msg(struct pci_dev *pdev,
2929 			    unsigned int irq, unsigned int dest,
2930 			    struct msi_msg *msg, u8 hpet_id)
2931 {
2932 	struct irq_cfg *cfg = irq_cfg(irq);
2933 
2934 	msg->address_hi = MSI_ADDR_BASE_HI;
2935 
2936 	if (x2apic_enabled())
2937 		msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest);
2938 
2939 	msg->address_lo =
2940 		MSI_ADDR_BASE_LO |
2941 		((apic->irq_dest_mode == 0) ?
2942 			MSI_ADDR_DEST_MODE_PHYSICAL:
2943 			MSI_ADDR_DEST_MODE_LOGICAL) |
2944 		((apic->irq_delivery_mode != dest_LowestPrio) ?
2945 			MSI_ADDR_REDIRECTION_CPU:
2946 			MSI_ADDR_REDIRECTION_LOWPRI) |
2947 		MSI_ADDR_DEST_ID(dest);
2948 
2949 	msg->data =
2950 		MSI_DATA_TRIGGER_EDGE |
2951 		MSI_DATA_LEVEL_ASSERT |
2952 		((apic->irq_delivery_mode != dest_LowestPrio) ?
2953 			MSI_DATA_DELIVERY_FIXED:
2954 			MSI_DATA_DELIVERY_LOWPRI) |
2955 		MSI_DATA_VECTOR(cfg->vector);
2956 }
2957 
2958 #ifdef CONFIG_PCI_MSI
2959 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
2960 			   struct msi_msg *msg, u8 hpet_id)
2961 {
2962 	struct irq_cfg *cfg;
2963 	int err;
2964 	unsigned dest;
2965 
2966 	if (disable_apic)
2967 		return -ENXIO;
2968 
2969 	cfg = irq_cfg(irq);
2970 	err = assign_irq_vector(irq, cfg, apic->target_cpus());
2971 	if (err)
2972 		return err;
2973 
2974 	err = apic->cpu_mask_to_apicid_and(cfg->domain,
2975 					   apic->target_cpus(), &dest);
2976 	if (err)
2977 		return err;
2978 
2979 	x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id);
2980 
2981 	return 0;
2982 }
2983 
2984 static int
2985 msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
2986 {
2987 	struct irq_cfg *cfg = data->chip_data;
2988 	struct msi_msg msg;
2989 	unsigned int dest;
2990 	int ret;
2991 
2992 	ret = __ioapic_set_affinity(data, mask, &dest);
2993 	if (ret)
2994 		return ret;
2995 
2996 	__get_cached_msi_msg(data->msi_desc, &msg);
2997 
2998 	msg.data &= ~MSI_DATA_VECTOR_MASK;
2999 	msg.data |= MSI_DATA_VECTOR(cfg->vector);
3000 	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3001 	msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3002 
3003 	__write_msi_msg(data->msi_desc, &msg);
3004 
3005 	return IRQ_SET_MASK_OK_NOCOPY;
3006 }
3007 
3008 /*
3009  * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
3010  * which implement the MSI or MSI-X Capability Structure.
3011  */
3012 static struct irq_chip msi_chip = {
3013 	.name			= "PCI-MSI",
3014 	.irq_unmask		= unmask_msi_irq,
3015 	.irq_mask		= mask_msi_irq,
3016 	.irq_ack		= ack_apic_edge,
3017 	.irq_set_affinity	= msi_set_affinity,
3018 	.irq_retrigger		= ioapic_retrigger_irq,
3019 };
3020 
3021 int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
3022 		  unsigned int irq_base, unsigned int irq_offset)
3023 {
3024 	struct irq_chip *chip = &msi_chip;
3025 	struct msi_msg msg;
3026 	unsigned int irq = irq_base + irq_offset;
3027 	int ret;
3028 
3029 	ret = msi_compose_msg(dev, irq, &msg, -1);
3030 	if (ret < 0)
3031 		return ret;
3032 
3033 	irq_set_msi_desc_off(irq_base, irq_offset, msidesc);
3034 
3035 	/*
3036 	 * MSI-X message is written per-IRQ, the offset is always 0.
3037 	 * MSI message denotes a contiguous group of IRQs, written for 0th IRQ.
3038 	 */
3039 	if (!irq_offset)
3040 		write_msi_msg(irq, &msg);
3041 
3042 	setup_remapped_irq(irq, irq_cfg(irq), chip);
3043 
3044 	irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
3045 
3046 	dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
3047 
3048 	return 0;
3049 }
3050 
3051 int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3052 {
3053 	struct msi_desc *msidesc;
3054 	unsigned int irq;
3055 	int node, ret;
3056 
3057 	/* Multiple MSI vectors only supported with interrupt remapping */
3058 	if (type == PCI_CAP_ID_MSI && nvec > 1)
3059 		return 1;
3060 
3061 	node = dev_to_node(&dev->dev);
3062 
3063 	list_for_each_entry(msidesc, &dev->msi_list, list) {
3064 		irq = irq_alloc_hwirq(node);
3065 		if (!irq)
3066 			return -ENOSPC;
3067 
3068 		ret = setup_msi_irq(dev, msidesc, irq, 0);
3069 		if (ret < 0) {
3070 			irq_free_hwirq(irq);
3071 			return ret;
3072 		}
3073 
3074 	}
3075 	return 0;
3076 }
3077 
3078 void native_teardown_msi_irq(unsigned int irq)
3079 {
3080 	irq_free_hwirq(irq);
3081 }
3082 
3083 #ifdef CONFIG_DMAR_TABLE
3084 static int
3085 dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
3086 		      bool force)
3087 {
3088 	struct irq_cfg *cfg = data->chip_data;
3089 	unsigned int dest, irq = data->irq;
3090 	struct msi_msg msg;
3091 	int ret;
3092 
3093 	ret = __ioapic_set_affinity(data, mask, &dest);
3094 	if (ret)
3095 		return ret;
3096 
3097 	dmar_msi_read(irq, &msg);
3098 
3099 	msg.data &= ~MSI_DATA_VECTOR_MASK;
3100 	msg.data |= MSI_DATA_VECTOR(cfg->vector);
3101 	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3102 	msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3103 	msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
3104 
3105 	dmar_msi_write(irq, &msg);
3106 
3107 	return IRQ_SET_MASK_OK_NOCOPY;
3108 }
3109 
3110 static struct irq_chip dmar_msi_type = {
3111 	.name			= "DMAR_MSI",
3112 	.irq_unmask		= dmar_msi_unmask,
3113 	.irq_mask		= dmar_msi_mask,
3114 	.irq_ack		= ack_apic_edge,
3115 	.irq_set_affinity	= dmar_msi_set_affinity,
3116 	.irq_retrigger		= ioapic_retrigger_irq,
3117 };
3118 
3119 int arch_setup_dmar_msi(unsigned int irq)
3120 {
3121 	int ret;
3122 	struct msi_msg msg;
3123 
3124 	ret = msi_compose_msg(NULL, irq, &msg, -1);
3125 	if (ret < 0)
3126 		return ret;
3127 	dmar_msi_write(irq, &msg);
3128 	irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
3129 				      "edge");
3130 	return 0;
3131 }
3132 #endif
3133 
3134 #ifdef CONFIG_HPET_TIMER
3135 
3136 static int hpet_msi_set_affinity(struct irq_data *data,
3137 				 const struct cpumask *mask, bool force)
3138 {
3139 	struct irq_cfg *cfg = data->chip_data;
3140 	struct msi_msg msg;
3141 	unsigned int dest;
3142 	int ret;
3143 
3144 	ret = __ioapic_set_affinity(data, mask, &dest);
3145 	if (ret)
3146 		return ret;
3147 
3148 	hpet_msi_read(data->handler_data, &msg);
3149 
3150 	msg.data &= ~MSI_DATA_VECTOR_MASK;
3151 	msg.data |= MSI_DATA_VECTOR(cfg->vector);
3152 	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3153 	msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3154 
3155 	hpet_msi_write(data->handler_data, &msg);
3156 
3157 	return IRQ_SET_MASK_OK_NOCOPY;
3158 }
3159 
3160 static struct irq_chip hpet_msi_type = {
3161 	.name = "HPET_MSI",
3162 	.irq_unmask = hpet_msi_unmask,
3163 	.irq_mask = hpet_msi_mask,
3164 	.irq_ack = ack_apic_edge,
3165 	.irq_set_affinity = hpet_msi_set_affinity,
3166 	.irq_retrigger = ioapic_retrigger_irq,
3167 };
3168 
3169 int default_setup_hpet_msi(unsigned int irq, unsigned int id)
3170 {
3171 	struct irq_chip *chip = &hpet_msi_type;
3172 	struct msi_msg msg;
3173 	int ret;
3174 
3175 	ret = msi_compose_msg(NULL, irq, &msg, id);
3176 	if (ret < 0)
3177 		return ret;
3178 
3179 	hpet_msi_write(irq_get_handler_data(irq), &msg);
3180 	irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
3181 	setup_remapped_irq(irq, irq_cfg(irq), chip);
3182 
3183 	irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
3184 	return 0;
3185 }
3186 #endif
3187 
3188 #endif /* CONFIG_PCI_MSI */
3189 /*
3190  * Hypertransport interrupt support
3191  */
3192 #ifdef CONFIG_HT_IRQ
3193 
3194 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3195 {
3196 	struct ht_irq_msg msg;
3197 	fetch_ht_irq_msg(irq, &msg);
3198 
3199 	msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
3200 	msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
3201 
3202 	msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
3203 	msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
3204 
3205 	write_ht_irq_msg(irq, &msg);
3206 }
3207 
3208 static int
3209 ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3210 {
3211 	struct irq_cfg *cfg = data->chip_data;
3212 	unsigned int dest;
3213 	int ret;
3214 
3215 	ret = __ioapic_set_affinity(data, mask, &dest);
3216 	if (ret)
3217 		return ret;
3218 
3219 	target_ht_irq(data->irq, dest, cfg->vector);
3220 	return IRQ_SET_MASK_OK_NOCOPY;
3221 }
3222 
3223 static struct irq_chip ht_irq_chip = {
3224 	.name			= "PCI-HT",
3225 	.irq_mask		= mask_ht_irq,
3226 	.irq_unmask		= unmask_ht_irq,
3227 	.irq_ack		= ack_apic_edge,
3228 	.irq_set_affinity	= ht_set_affinity,
3229 	.irq_retrigger		= ioapic_retrigger_irq,
3230 };
3231 
3232 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3233 {
3234 	struct irq_cfg *cfg;
3235 	struct ht_irq_msg msg;
3236 	unsigned dest;
3237 	int err;
3238 
3239 	if (disable_apic)
3240 		return -ENXIO;
3241 
3242 	cfg = irq_cfg(irq);
3243 	err = assign_irq_vector(irq, cfg, apic->target_cpus());
3244 	if (err)
3245 		return err;
3246 
3247 	err = apic->cpu_mask_to_apicid_and(cfg->domain,
3248 					   apic->target_cpus(), &dest);
3249 	if (err)
3250 		return err;
3251 
3252 	msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3253 
3254 	msg.address_lo =
3255 		HT_IRQ_LOW_BASE |
3256 		HT_IRQ_LOW_DEST_ID(dest) |
3257 		HT_IRQ_LOW_VECTOR(cfg->vector) |
3258 		((apic->irq_dest_mode == 0) ?
3259 			HT_IRQ_LOW_DM_PHYSICAL :
3260 			HT_IRQ_LOW_DM_LOGICAL) |
3261 		HT_IRQ_LOW_RQEOI_EDGE |
3262 		((apic->irq_delivery_mode != dest_LowestPrio) ?
3263 			HT_IRQ_LOW_MT_FIXED :
3264 			HT_IRQ_LOW_MT_ARBITRATED) |
3265 		HT_IRQ_LOW_IRQ_MASKED;
3266 
3267 	write_ht_irq_msg(irq, &msg);
3268 
3269 	irq_set_chip_and_handler_name(irq, &ht_irq_chip,
3270 				      handle_edge_irq, "edge");
3271 
3272 	dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
3273 
3274 	return 0;
3275 }
3276 #endif /* CONFIG_HT_IRQ */
3277 
3278 static int
3279 io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr)
3280 {
3281 	struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node);
3282 	int ret;
3283 
3284 	if (!cfg)
3285 		return -EINVAL;
3286 	ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin);
3287 	if (!ret)
3288 		setup_ioapic_irq(irq, cfg, attr);
3289 	return ret;
3290 }
3291 
3292 int io_apic_setup_irq_pin_once(unsigned int irq, int node,
3293 			       struct io_apic_irq_attr *attr)
3294 {
3295 	unsigned int ioapic_idx = attr->ioapic, pin = attr->ioapic_pin;
3296 	int ret;
3297 	struct IO_APIC_route_entry orig_entry;
3298 
3299 	/* Avoid redundant programming */
3300 	if (test_bit(pin, ioapics[ioapic_idx].pin_programmed)) {
3301 		pr_debug("Pin %d-%d already programmed\n", mpc_ioapic_id(ioapic_idx), pin);
3302 		orig_entry = ioapic_read_entry(attr->ioapic, pin);
3303 		if (attr->trigger == orig_entry.trigger && attr->polarity == orig_entry.polarity)
3304 			return 0;
3305 		return -EBUSY;
3306 	}
3307 	ret = io_apic_setup_irq_pin(irq, node, attr);
3308 	if (!ret)
3309 		set_bit(pin, ioapics[ioapic_idx].pin_programmed);
3310 	return ret;
3311 }
3312 
3313 static int __init io_apic_get_redir_entries(int ioapic)
3314 {
3315 	union IO_APIC_reg_01	reg_01;
3316 	unsigned long flags;
3317 
3318 	raw_spin_lock_irqsave(&ioapic_lock, flags);
3319 	reg_01.raw = io_apic_read(ioapic, 1);
3320 	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3321 
3322 	/* The register returns the maximum index redir index
3323 	 * supported, which is one less than the total number of redir
3324 	 * entries.
3325 	 */
3326 	return reg_01.bits.entries + 1;
3327 }
3328 
3329 unsigned int arch_dynirq_lower_bound(unsigned int from)
3330 {
3331 	unsigned int min = gsi_top + NR_IRQS_LEGACY;
3332 
3333 	return from < min ? min : from;
3334 }
3335 
3336 int __init arch_probe_nr_irqs(void)
3337 {
3338 	int nr;
3339 
3340 	if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
3341 		nr_irqs = NR_VECTORS * nr_cpu_ids;
3342 
3343 	nr = (gsi_top + NR_IRQS_LEGACY) + 8 * nr_cpu_ids;
3344 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
3345 	/*
3346 	 * for MSI and HT dyn irq
3347 	 */
3348 	nr += (gsi_top + NR_IRQS_LEGACY) * 16;
3349 #endif
3350 	if (nr < nr_irqs)
3351 		nr_irqs = nr;
3352 
3353 	return NR_IRQS_LEGACY;
3354 }
3355 
3356 int io_apic_set_pci_routing(struct device *dev, int irq,
3357 			    struct io_apic_irq_attr *irq_attr)
3358 {
3359 	int node;
3360 
3361 	if (!IO_APIC_IRQ(irq)) {
3362 		apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
3363 			    irq_attr->ioapic);
3364 		return -EINVAL;
3365 	}
3366 
3367 	node = dev ? dev_to_node(dev) : cpu_to_node(0);
3368 
3369 	return io_apic_setup_irq_pin_once(irq, node, irq_attr);
3370 }
3371 
3372 #ifdef CONFIG_X86_32
3373 static int __init io_apic_get_unique_id(int ioapic, int apic_id)
3374 {
3375 	union IO_APIC_reg_00 reg_00;
3376 	static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
3377 	physid_mask_t tmp;
3378 	unsigned long flags;
3379 	int i = 0;
3380 
3381 	/*
3382 	 * The P4 platform supports up to 256 APIC IDs on two separate APIC
3383 	 * buses (one for LAPICs, one for IOAPICs), where predecessors only
3384 	 * supports up to 16 on one shared APIC bus.
3385 	 *
3386 	 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
3387 	 *      advantage of new APIC bus architecture.
3388 	 */
3389 
3390 	if (physids_empty(apic_id_map))
3391 		apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
3392 
3393 	raw_spin_lock_irqsave(&ioapic_lock, flags);
3394 	reg_00.raw = io_apic_read(ioapic, 0);
3395 	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3396 
3397 	if (apic_id >= get_physical_broadcast()) {
3398 		printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
3399 			"%d\n", ioapic, apic_id, reg_00.bits.ID);
3400 		apic_id = reg_00.bits.ID;
3401 	}
3402 
3403 	/*
3404 	 * Every APIC in a system must have a unique ID or we get lots of nice
3405 	 * 'stuck on smp_invalidate_needed IPI wait' messages.
3406 	 */
3407 	if (apic->check_apicid_used(&apic_id_map, apic_id)) {
3408 
3409 		for (i = 0; i < get_physical_broadcast(); i++) {
3410 			if (!apic->check_apicid_used(&apic_id_map, i))
3411 				break;
3412 		}
3413 
3414 		if (i == get_physical_broadcast())
3415 			panic("Max apic_id exceeded!\n");
3416 
3417 		printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
3418 			"trying %d\n", ioapic, apic_id, i);
3419 
3420 		apic_id = i;
3421 	}
3422 
3423 	apic->apicid_to_cpu_present(apic_id, &tmp);
3424 	physids_or(apic_id_map, apic_id_map, tmp);
3425 
3426 	if (reg_00.bits.ID != apic_id) {
3427 		reg_00.bits.ID = apic_id;
3428 
3429 		raw_spin_lock_irqsave(&ioapic_lock, flags);
3430 		io_apic_write(ioapic, 0, reg_00.raw);
3431 		reg_00.raw = io_apic_read(ioapic, 0);
3432 		raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3433 
3434 		/* Sanity check */
3435 		if (reg_00.bits.ID != apic_id) {
3436 			pr_err("IOAPIC[%d]: Unable to change apic_id!\n",
3437 			       ioapic);
3438 			return -1;
3439 		}
3440 	}
3441 
3442 	apic_printk(APIC_VERBOSE, KERN_INFO
3443 			"IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
3444 
3445 	return apic_id;
3446 }
3447 
3448 static u8 __init io_apic_unique_id(u8 id)
3449 {
3450 	if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
3451 	    !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
3452 		return io_apic_get_unique_id(nr_ioapics, id);
3453 	else
3454 		return id;
3455 }
3456 #else
3457 static u8 __init io_apic_unique_id(u8 id)
3458 {
3459 	int i;
3460 	DECLARE_BITMAP(used, 256);
3461 
3462 	bitmap_zero(used, 256);
3463 	for_each_ioapic(i)
3464 		__set_bit(mpc_ioapic_id(i), used);
3465 	if (!test_bit(id, used))
3466 		return id;
3467 	return find_first_zero_bit(used, 256);
3468 }
3469 #endif
3470 
3471 static int __init io_apic_get_version(int ioapic)
3472 {
3473 	union IO_APIC_reg_01	reg_01;
3474 	unsigned long flags;
3475 
3476 	raw_spin_lock_irqsave(&ioapic_lock, flags);
3477 	reg_01.raw = io_apic_read(ioapic, 1);
3478 	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3479 
3480 	return reg_01.bits.version;
3481 }
3482 
3483 int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
3484 {
3485 	int ioapic, pin, idx;
3486 
3487 	if (skip_ioapic_setup)
3488 		return -1;
3489 
3490 	ioapic = mp_find_ioapic(gsi);
3491 	if (ioapic < 0)
3492 		return -1;
3493 
3494 	pin = mp_find_ioapic_pin(ioapic, gsi);
3495 	if (pin < 0)
3496 		return -1;
3497 
3498 	idx = find_irq_entry(ioapic, pin, mp_INT);
3499 	if (idx < 0)
3500 		return -1;
3501 
3502 	*trigger = irq_trigger(idx);
3503 	*polarity = irq_polarity(idx);
3504 	return 0;
3505 }
3506 
3507 /*
3508  * This function currently is only a helper for the i386 smp boot process where
3509  * we need to reprogram the ioredtbls to cater for the cpus which have come online
3510  * so mask in all cases should simply be apic->target_cpus()
3511  */
3512 #ifdef CONFIG_SMP
3513 void __init setup_ioapic_dest(void)
3514 {
3515 	int pin, ioapic, irq, irq_entry;
3516 	const struct cpumask *mask;
3517 	struct irq_data *idata;
3518 
3519 	if (skip_ioapic_setup == 1)
3520 		return;
3521 
3522 	for_each_ioapic_pin(ioapic, pin) {
3523 		irq_entry = find_irq_entry(ioapic, pin, mp_INT);
3524 		if (irq_entry == -1)
3525 			continue;
3526 		irq = pin_2_irq(irq_entry, ioapic, pin);
3527 
3528 		if ((ioapic > 0) && (irq > NR_IRQS_LEGACY))
3529 			continue;
3530 
3531 		idata = irq_get_irq_data(irq);
3532 
3533 		/*
3534 		 * Honour affinities which have been set in early boot
3535 		 */
3536 		if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata))
3537 			mask = idata->affinity;
3538 		else
3539 			mask = apic->target_cpus();
3540 
3541 		x86_io_apic_ops.set_affinity(idata, mask, false);
3542 	}
3543 
3544 }
3545 #endif
3546 
3547 #define IOAPIC_RESOURCE_NAME_SIZE 11
3548 
3549 static struct resource *ioapic_resources;
3550 
3551 static struct resource * __init ioapic_setup_resources(void)
3552 {
3553 	unsigned long n;
3554 	struct resource *res;
3555 	char *mem;
3556 	int i, num = 0;
3557 
3558 	for_each_ioapic(i)
3559 		num++;
3560 	if (num == 0)
3561 		return NULL;
3562 
3563 	n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
3564 	n *= num;
3565 
3566 	mem = alloc_bootmem(n);
3567 	res = (void *)mem;
3568 
3569 	mem += sizeof(struct resource) * num;
3570 
3571 	num = 0;
3572 	for_each_ioapic(i) {
3573 		res[num].name = mem;
3574 		res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
3575 		snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
3576 		mem += IOAPIC_RESOURCE_NAME_SIZE;
3577 		num++;
3578 	}
3579 
3580 	ioapic_resources = res;
3581 
3582 	return res;
3583 }
3584 
3585 void __init native_io_apic_init_mappings(void)
3586 {
3587 	unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3588 	struct resource *ioapic_res;
3589 	int i;
3590 
3591 	ioapic_res = ioapic_setup_resources();
3592 	for_each_ioapic(i) {
3593 		if (smp_found_config) {
3594 			ioapic_phys = mpc_ioapic_addr(i);
3595 #ifdef CONFIG_X86_32
3596 			if (!ioapic_phys) {
3597 				printk(KERN_ERR
3598 				       "WARNING: bogus zero IO-APIC "
3599 				       "address found in MPTABLE, "
3600 				       "disabling IO/APIC support!\n");
3601 				smp_found_config = 0;
3602 				skip_ioapic_setup = 1;
3603 				goto fake_ioapic_page;
3604 			}
3605 #endif
3606 		} else {
3607 #ifdef CONFIG_X86_32
3608 fake_ioapic_page:
3609 #endif
3610 			ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
3611 			ioapic_phys = __pa(ioapic_phys);
3612 		}
3613 		set_fixmap_nocache(idx, ioapic_phys);
3614 		apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n",
3615 			__fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK),
3616 			ioapic_phys);
3617 		idx++;
3618 
3619 		ioapic_res->start = ioapic_phys;
3620 		ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1;
3621 		ioapic_res++;
3622 	}
3623 }
3624 
3625 void __init ioapic_insert_resources(void)
3626 {
3627 	int i;
3628 	struct resource *r = ioapic_resources;
3629 
3630 	if (!r) {
3631 		if (nr_ioapics > 0)
3632 			printk(KERN_ERR
3633 				"IO APIC resources couldn't be allocated.\n");
3634 		return;
3635 	}
3636 
3637 	for_each_ioapic(i) {
3638 		insert_resource(&iomem_resource, r);
3639 		r++;
3640 	}
3641 }
3642 
3643 int mp_find_ioapic(u32 gsi)
3644 {
3645 	int i;
3646 
3647 	if (nr_ioapics == 0)
3648 		return -1;
3649 
3650 	/* Find the IOAPIC that manages this GSI. */
3651 	for_each_ioapic(i) {
3652 		struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(i);
3653 		if (gsi >= gsi_cfg->gsi_base && gsi <= gsi_cfg->gsi_end)
3654 			return i;
3655 	}
3656 
3657 	printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
3658 	return -1;
3659 }
3660 
3661 int mp_find_ioapic_pin(int ioapic, u32 gsi)
3662 {
3663 	struct mp_ioapic_gsi *gsi_cfg;
3664 
3665 	if (WARN_ON(ioapic < 0))
3666 		return -1;
3667 
3668 	gsi_cfg = mp_ioapic_gsi_routing(ioapic);
3669 	if (WARN_ON(gsi > gsi_cfg->gsi_end))
3670 		return -1;
3671 
3672 	return gsi - gsi_cfg->gsi_base;
3673 }
3674 
3675 static __init int bad_ioapic(unsigned long address)
3676 {
3677 	if (nr_ioapics >= MAX_IO_APICS) {
3678 		pr_warn("WARNING: Max # of I/O APICs (%d) exceeded (found %d), skipping\n",
3679 			MAX_IO_APICS, nr_ioapics);
3680 		return 1;
3681 	}
3682 	if (!address) {
3683 		pr_warn("WARNING: Bogus (zero) I/O APIC address found in table, skipping!\n");
3684 		return 1;
3685 	}
3686 	return 0;
3687 }
3688 
3689 static __init int bad_ioapic_register(int idx)
3690 {
3691 	union IO_APIC_reg_00 reg_00;
3692 	union IO_APIC_reg_01 reg_01;
3693 	union IO_APIC_reg_02 reg_02;
3694 
3695 	reg_00.raw = io_apic_read(idx, 0);
3696 	reg_01.raw = io_apic_read(idx, 1);
3697 	reg_02.raw = io_apic_read(idx, 2);
3698 
3699 	if (reg_00.raw == -1 && reg_01.raw == -1 && reg_02.raw == -1) {
3700 		pr_warn("I/O APIC 0x%x registers return all ones, skipping!\n",
3701 			mpc_ioapic_addr(idx));
3702 		return 1;
3703 	}
3704 
3705 	return 0;
3706 }
3707 
3708 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
3709 {
3710 	int idx = 0;
3711 	int entries;
3712 	struct mp_ioapic_gsi *gsi_cfg;
3713 
3714 	if (bad_ioapic(address))
3715 		return;
3716 
3717 	idx = nr_ioapics;
3718 
3719 	ioapics[idx].mp_config.type = MP_IOAPIC;
3720 	ioapics[idx].mp_config.flags = MPC_APIC_USABLE;
3721 	ioapics[idx].mp_config.apicaddr = address;
3722 
3723 	set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
3724 
3725 	if (bad_ioapic_register(idx)) {
3726 		clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
3727 		return;
3728 	}
3729 
3730 	ioapics[idx].mp_config.apicid = io_apic_unique_id(id);
3731 	ioapics[idx].mp_config.apicver = io_apic_get_version(idx);
3732 
3733 	/*
3734 	 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
3735 	 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
3736 	 */
3737 	entries = io_apic_get_redir_entries(idx);
3738 	gsi_cfg = mp_ioapic_gsi_routing(idx);
3739 	gsi_cfg->gsi_base = gsi_base;
3740 	gsi_cfg->gsi_end = gsi_base + entries - 1;
3741 
3742 	/*
3743 	 * The number of IO-APIC IRQ registers (== #pins):
3744 	 */
3745 	ioapics[idx].nr_registers = entries;
3746 
3747 	if (gsi_cfg->gsi_end >= gsi_top)
3748 		gsi_top = gsi_cfg->gsi_end + 1;
3749 
3750 	pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n",
3751 		idx, mpc_ioapic_id(idx),
3752 		mpc_ioapic_ver(idx), mpc_ioapic_addr(idx),
3753 		gsi_cfg->gsi_base, gsi_cfg->gsi_end);
3754 
3755 	nr_ioapics++;
3756 }
3757 
3758 /* Enable IOAPIC early just for system timer */
3759 void __init pre_init_apic_IRQ0(void)
3760 {
3761 	struct io_apic_irq_attr attr = { 0, 0, 0, 0 };
3762 
3763 	printk(KERN_INFO "Early APIC setup for system timer0\n");
3764 #ifndef CONFIG_SMP
3765 	physid_set_mask_of_physid(boot_cpu_physical_apicid,
3766 					 &phys_cpu_present_map);
3767 #endif
3768 	setup_local_APIC();
3769 
3770 	io_apic_setup_irq_pin(0, 0, &attr);
3771 	irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq,
3772 				      "edge");
3773 }
3774