xref: /openbmc/linux/arch/ia64/kernel/iosapic.c (revision a1e58bbd)
1 /*
2  * I/O SAPIC support.
3  *
4  * Copyright (C) 1999 Intel Corp.
5  * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
6  * Copyright (C) 2000-2002 J.I. Lee <jung-ik.lee@intel.com>
7  * Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co.
8  *	David Mosberger-Tang <davidm@hpl.hp.com>
9  * Copyright (C) 1999 VA Linux Systems
10  * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
11  *
12  * 00/04/19	D. Mosberger	Rewritten to mirror more closely the x86 I/O
13  *				APIC code.  In particular, we now have separate
14  *				handlers for edge and level triggered
15  *				interrupts.
16  * 00/10/27	Asit Mallick, Goutham Rao <goutham.rao@intel.com> IRQ vector
17  *				allocation PCI to vector mapping, shared PCI
18  *				interrupts.
19  * 00/10/27	D. Mosberger	Document things a bit more to make them more
20  *				understandable.  Clean up much of the old
21  *				IOSAPIC cruft.
22  * 01/07/27	J.I. Lee	PCI irq routing, Platform/Legacy interrupts
23  *				and fixes for ACPI S5(SoftOff) support.
24  * 02/01/23	J.I. Lee	iosapic pgm fixes for PCI irq routing from _PRT
25  * 02/01/07     E. Focht        <efocht@ess.nec.de> Redirectable interrupt
26  *				vectors in iosapic_set_affinity(),
27  *				initializations for /proc/irq/#/smp_affinity
28  * 02/04/02	P. Diefenbaugh	Cleaned up ACPI PCI IRQ routing.
29  * 02/04/18	J.I. Lee	bug fix in iosapic_init_pci_irq
30  * 02/04/30	J.I. Lee	bug fix in find_iosapic to fix ACPI PCI IRQ to
31  *				IOSAPIC mapping error
32  * 02/07/29	T. Kochi	Allocate interrupt vectors dynamically
33  * 02/08/04	T. Kochi	Cleaned up terminology (irq, global system
34  *				interrupt, vector, etc.)
35  * 02/09/20	D. Mosberger	Simplified by taking advantage of ACPI's
36  *				pci_irq code.
37  * 03/02/19	B. Helgaas	Make pcat_compat system-wide, not per-IOSAPIC.
38  *				Remove iosapic_address & gsi_base from
39  *				external interfaces.  Rationalize
40  *				__init/__devinit attributes.
41  * 04/12/04 Ashok Raj	<ashok.raj@intel.com> Intel Corporation 2004
42  *				Updated to work with irq migration necessary
43  *				for CPU Hotplug
44  */
45 /*
46  * Here is what the interrupt logic between a PCI device and the kernel looks
47  * like:
48  *
49  * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC,
50  *     INTD).  The device is uniquely identified by its bus-, and slot-number
51  *     (the function number does not matter here because all functions share
52  *     the same interrupt lines).
53  *
54  * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC
55  *     controller.  Multiple interrupt lines may have to share the same
56  *     IOSAPIC pin (if they're level triggered and use the same polarity).
57  *     Each interrupt line has a unique Global System Interrupt (GSI) number
58  *     which can be calculated as the sum of the controller's base GSI number
59  *     and the IOSAPIC pin number to which the line connects.
60  *
61  * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the
62  * IOSAPIC pin into the IA-64 interrupt vector.  This interrupt vector is then
63  * sent to the CPU.
64  *
65  * (4) The kernel recognizes an interrupt as an IRQ.  The IRQ interface is
66  *     used as architecture-independent interrupt handling mechanism in Linux.
67  *     As an IRQ is a number, we have to have
68  *     IA-64 interrupt vector number <-> IRQ number mapping.  On smaller
69  *     systems, we use one-to-one mapping between IA-64 vector and IRQ.  A
70  *     platform can implement platform_irq_to_vector(irq) and
71  *     platform_local_vector_to_irq(vector) APIs to differentiate the mapping.
72  *     Please see also include/asm-ia64/hw_irq.h for those APIs.
73  *
74  * To sum up, there are three levels of mappings involved:
75  *
76  *	PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ
77  *
78  * Note: The term "IRQ" is loosely used everywhere in Linux kernel to
79  * describeinterrupts.  Now we use "IRQ" only for Linux IRQ's.  ISA IRQ
80  * (isa_irq) is the only exception in this source code.
81  */
82 
83 #include <linux/acpi.h>
84 #include <linux/init.h>
85 #include <linux/irq.h>
86 #include <linux/kernel.h>
87 #include <linux/list.h>
88 #include <linux/pci.h>
89 #include <linux/smp.h>
90 #include <linux/string.h>
91 #include <linux/bootmem.h>
92 
93 #include <asm/delay.h>
94 #include <asm/hw_irq.h>
95 #include <asm/io.h>
96 #include <asm/iosapic.h>
97 #include <asm/machvec.h>
98 #include <asm/processor.h>
99 #include <asm/ptrace.h>
100 #include <asm/system.h>
101 
102 #undef DEBUG_INTERRUPT_ROUTING
103 
104 #ifdef DEBUG_INTERRUPT_ROUTING
105 #define DBG(fmt...)	printk(fmt)
106 #else
107 #define DBG(fmt...)
108 #endif
109 
110 #define NR_PREALLOCATE_RTE_ENTRIES \
111 	(PAGE_SIZE / sizeof(struct iosapic_rte_info))
112 #define RTE_PREALLOCATED	(1)
113 
114 static DEFINE_SPINLOCK(iosapic_lock);
115 
116 /*
117  * These tables map IA-64 vectors to the IOSAPIC pin that generates this
118  * vector.
119  */
120 
121 #define NO_REF_RTE	0
122 
123 static struct iosapic {
124 	char __iomem	*addr;		/* base address of IOSAPIC */
125 	unsigned int	gsi_base;	/* GSI base */
126 	unsigned short	num_rte;	/* # of RTEs on this IOSAPIC */
127 	int		rtes_inuse;	/* # of RTEs in use on this IOSAPIC */
128 #ifdef CONFIG_NUMA
129 	unsigned short	node;		/* numa node association via pxm */
130 #endif
131 	spinlock_t	lock;		/* lock for indirect reg access */
132 } iosapic_lists[NR_IOSAPICS];
133 
134 struct iosapic_rte_info {
135 	struct list_head rte_list;	/* RTEs sharing the same vector */
136 	char		rte_index;	/* IOSAPIC RTE index */
137 	int		refcnt;		/* reference counter */
138 	unsigned int	flags;		/* flags */
139 	struct iosapic	*iosapic;
140 } ____cacheline_aligned;
141 
142 static struct iosapic_intr_info {
143 	struct list_head rtes;		/* RTEs using this vector (empty =>
144 					 * not an IOSAPIC interrupt) */
145 	int		count;		/* # of registered RTEs */
146 	u32		low32;		/* current value of low word of
147 					 * Redirection table entry */
148 	unsigned int	dest;		/* destination CPU physical ID */
149 	unsigned char	dmode	: 3;	/* delivery mode (see iosapic.h) */
150 	unsigned char 	polarity: 1;	/* interrupt polarity
151 					 * (see iosapic.h) */
152 	unsigned char	trigger	: 1;	/* trigger mode (see iosapic.h) */
153 } iosapic_intr_info[NR_IRQS];
154 
155 static unsigned char pcat_compat __devinitdata;	/* 8259 compatibility flag */
156 
157 static int iosapic_kmalloc_ok;
158 static LIST_HEAD(free_rte_list);
159 
160 static inline void
161 iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val)
162 {
163 	unsigned long flags;
164 
165 	spin_lock_irqsave(&iosapic->lock, flags);
166 	__iosapic_write(iosapic->addr, reg, val);
167 	spin_unlock_irqrestore(&iosapic->lock, flags);
168 }
169 
170 /*
171  * Find an IOSAPIC associated with a GSI
172  */
173 static inline int
174 find_iosapic (unsigned int gsi)
175 {
176 	int i;
177 
178 	for (i = 0; i < NR_IOSAPICS; i++) {
179 		if ((unsigned) (gsi - iosapic_lists[i].gsi_base) <
180 		    iosapic_lists[i].num_rte)
181 			return i;
182 	}
183 
184 	return -1;
185 }
186 
187 static inline int __gsi_to_irq(unsigned int gsi)
188 {
189 	int irq;
190 	struct iosapic_intr_info *info;
191 	struct iosapic_rte_info *rte;
192 
193 	for (irq = 0; irq < NR_IRQS; irq++) {
194 		info = &iosapic_intr_info[irq];
195 		list_for_each_entry(rte, &info->rtes, rte_list)
196 			if (rte->iosapic->gsi_base + rte->rte_index == gsi)
197 				return irq;
198 	}
199 	return -1;
200 }
201 
202 int
203 gsi_to_irq (unsigned int gsi)
204 {
205 	unsigned long flags;
206 	int irq;
207 
208 	spin_lock_irqsave(&iosapic_lock, flags);
209 	irq = __gsi_to_irq(gsi);
210 	spin_unlock_irqrestore(&iosapic_lock, flags);
211 	return irq;
212 }
213 
214 static struct iosapic_rte_info *find_rte(unsigned int irq, unsigned int gsi)
215 {
216 	struct iosapic_rte_info *rte;
217 
218 	list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
219 		if (rte->iosapic->gsi_base + rte->rte_index == gsi)
220 			return rte;
221 	return NULL;
222 }
223 
224 static void
225 set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask)
226 {
227 	unsigned long pol, trigger, dmode;
228 	u32 low32, high32;
229 	int rte_index;
230 	char redir;
231 	struct iosapic_rte_info *rte;
232 	ia64_vector vector = irq_to_vector(irq);
233 
234 	DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest);
235 
236 	rte = find_rte(irq, gsi);
237 	if (!rte)
238 		return;		/* not an IOSAPIC interrupt */
239 
240 	rte_index = rte->rte_index;
241 	pol     = iosapic_intr_info[irq].polarity;
242 	trigger = iosapic_intr_info[irq].trigger;
243 	dmode   = iosapic_intr_info[irq].dmode;
244 
245 	redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0;
246 
247 #ifdef CONFIG_SMP
248 	set_irq_affinity_info(irq, (int)(dest & 0xffff), redir);
249 #endif
250 
251 	low32 = ((pol << IOSAPIC_POLARITY_SHIFT) |
252 		 (trigger << IOSAPIC_TRIGGER_SHIFT) |
253 		 (dmode << IOSAPIC_DELIVERY_SHIFT) |
254 		 ((mask ? 1 : 0) << IOSAPIC_MASK_SHIFT) |
255 		 vector);
256 
257 	/* dest contains both id and eid */
258 	high32 = (dest << IOSAPIC_DEST_SHIFT);
259 
260 	iosapic_write(rte->iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
261 	iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
262 	iosapic_intr_info[irq].low32 = low32;
263 	iosapic_intr_info[irq].dest = dest;
264 }
265 
266 static void
267 nop (unsigned int irq)
268 {
269 	/* do nothing... */
270 }
271 
272 
273 #ifdef CONFIG_KEXEC
274 void
275 kexec_disable_iosapic(void)
276 {
277 	struct iosapic_intr_info *info;
278 	struct iosapic_rte_info *rte;
279 	ia64_vector vec;
280 	int irq;
281 
282 	for (irq = 0; irq < NR_IRQS; irq++) {
283 		info = &iosapic_intr_info[irq];
284 		vec = irq_to_vector(irq);
285 		list_for_each_entry(rte, &info->rtes,
286 				rte_list) {
287 			iosapic_write(rte->iosapic,
288 					IOSAPIC_RTE_LOW(rte->rte_index),
289 					IOSAPIC_MASK|vec);
290 			iosapic_eoi(rte->iosapic->addr, vec);
291 		}
292 	}
293 }
294 #endif
295 
296 static void
297 mask_irq (unsigned int irq)
298 {
299 	u32 low32;
300 	int rte_index;
301 	struct iosapic_rte_info *rte;
302 
303 	if (!iosapic_intr_info[irq].count)
304 		return;			/* not an IOSAPIC interrupt! */
305 
306 	/* set only the mask bit */
307 	low32 = iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
308 	list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
309 		rte_index = rte->rte_index;
310 		iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
311 	}
312 }
313 
314 static void
315 unmask_irq (unsigned int irq)
316 {
317 	u32 low32;
318 	int rte_index;
319 	struct iosapic_rte_info *rte;
320 
321 	if (!iosapic_intr_info[irq].count)
322 		return;			/* not an IOSAPIC interrupt! */
323 
324 	low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK;
325 	list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
326 		rte_index = rte->rte_index;
327 		iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
328 	}
329 }
330 
331 
332 static void
333 iosapic_set_affinity (unsigned int irq, cpumask_t mask)
334 {
335 #ifdef CONFIG_SMP
336 	u32 high32, low32;
337 	int dest, rte_index;
338 	int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
339 	struct iosapic_rte_info *rte;
340 	struct iosapic *iosapic;
341 
342 	irq &= (~IA64_IRQ_REDIRECTED);
343 
344 	cpus_and(mask, mask, cpu_online_map);
345 	if (cpus_empty(mask))
346 		return;
347 
348 	if (irq_prepare_move(irq, first_cpu(mask)))
349 		return;
350 
351 	dest = cpu_physical_id(first_cpu(mask));
352 
353 	if (!iosapic_intr_info[irq].count)
354 		return;			/* not an IOSAPIC interrupt */
355 
356 	set_irq_affinity_info(irq, dest, redir);
357 
358 	/* dest contains both id and eid */
359 	high32 = dest << IOSAPIC_DEST_SHIFT;
360 
361 	low32 = iosapic_intr_info[irq].low32 & ~(7 << IOSAPIC_DELIVERY_SHIFT);
362 	if (redir)
363 		/* change delivery mode to lowest priority */
364 		low32 |= (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
365 	else
366 		/* change delivery mode to fixed */
367 		low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
368 	low32 &= IOSAPIC_VECTOR_MASK;
369 	low32 |= irq_to_vector(irq);
370 
371 	iosapic_intr_info[irq].low32 = low32;
372 	iosapic_intr_info[irq].dest = dest;
373 	list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
374 		iosapic = rte->iosapic;
375 		rte_index = rte->rte_index;
376 		iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
377 		iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
378 	}
379 #endif
380 }
381 
382 /*
383  * Handlers for level-triggered interrupts.
384  */
385 
386 static unsigned int
387 iosapic_startup_level_irq (unsigned int irq)
388 {
389 	unmask_irq(irq);
390 	return 0;
391 }
392 
393 static void
394 iosapic_end_level_irq (unsigned int irq)
395 {
396 	ia64_vector vec = irq_to_vector(irq);
397 	struct iosapic_rte_info *rte;
398 	int do_unmask_irq = 0;
399 
400 	irq_complete_move(irq);
401 	if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
402 		do_unmask_irq = 1;
403 		mask_irq(irq);
404 	}
405 
406 	list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
407 		iosapic_eoi(rte->iosapic->addr, vec);
408 
409 	if (unlikely(do_unmask_irq)) {
410 		move_masked_irq(irq);
411 		unmask_irq(irq);
412 	}
413 }
414 
415 #define iosapic_shutdown_level_irq	mask_irq
416 #define iosapic_enable_level_irq	unmask_irq
417 #define iosapic_disable_level_irq	mask_irq
418 #define iosapic_ack_level_irq		nop
419 
420 static struct irq_chip irq_type_iosapic_level = {
421 	.name =		"IO-SAPIC-level",
422 	.startup =	iosapic_startup_level_irq,
423 	.shutdown =	iosapic_shutdown_level_irq,
424 	.enable =	iosapic_enable_level_irq,
425 	.disable =	iosapic_disable_level_irq,
426 	.ack =		iosapic_ack_level_irq,
427 	.end =		iosapic_end_level_irq,
428 	.mask =		mask_irq,
429 	.unmask =	unmask_irq,
430 	.set_affinity =	iosapic_set_affinity
431 };
432 
433 /*
434  * Handlers for edge-triggered interrupts.
435  */
436 
437 static unsigned int
438 iosapic_startup_edge_irq (unsigned int irq)
439 {
440 	unmask_irq(irq);
441 	/*
442 	 * IOSAPIC simply drops interrupts pended while the
443 	 * corresponding pin was masked, so we can't know if an
444 	 * interrupt is pending already.  Let's hope not...
445 	 */
446 	return 0;
447 }
448 
449 static void
450 iosapic_ack_edge_irq (unsigned int irq)
451 {
452 	irq_desc_t *idesc = irq_desc + irq;
453 
454 	irq_complete_move(irq);
455 	move_native_irq(irq);
456 	/*
457 	 * Once we have recorded IRQ_PENDING already, we can mask the
458 	 * interrupt for real. This prevents IRQ storms from unhandled
459 	 * devices.
460 	 */
461 	if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) ==
462 	    (IRQ_PENDING|IRQ_DISABLED))
463 		mask_irq(irq);
464 }
465 
466 #define iosapic_enable_edge_irq		unmask_irq
467 #define iosapic_disable_edge_irq	nop
468 #define iosapic_end_edge_irq		nop
469 
470 static struct irq_chip irq_type_iosapic_edge = {
471 	.name =		"IO-SAPIC-edge",
472 	.startup =	iosapic_startup_edge_irq,
473 	.shutdown =	iosapic_disable_edge_irq,
474 	.enable =	iosapic_enable_edge_irq,
475 	.disable =	iosapic_disable_edge_irq,
476 	.ack =		iosapic_ack_edge_irq,
477 	.end =		iosapic_end_edge_irq,
478 	.mask =		mask_irq,
479 	.unmask =	unmask_irq,
480 	.set_affinity =	iosapic_set_affinity
481 };
482 
483 static unsigned int
484 iosapic_version (char __iomem *addr)
485 {
486 	/*
487 	 * IOSAPIC Version Register return 32 bit structure like:
488 	 * {
489 	 *	unsigned int version   : 8;
490 	 *	unsigned int reserved1 : 8;
491 	 *	unsigned int max_redir : 8;
492 	 *	unsigned int reserved2 : 8;
493 	 * }
494 	 */
495 	return __iosapic_read(addr, IOSAPIC_VERSION);
496 }
497 
498 static int iosapic_find_sharable_irq(unsigned long trigger, unsigned long pol)
499 {
500 	int i, irq = -ENOSPC, min_count = -1;
501 	struct iosapic_intr_info *info;
502 
503 	/*
504 	 * shared vectors for edge-triggered interrupts are not
505 	 * supported yet
506 	 */
507 	if (trigger == IOSAPIC_EDGE)
508 		return -EINVAL;
509 
510 	for (i = 0; i <= NR_IRQS; i++) {
511 		info = &iosapic_intr_info[i];
512 		if (info->trigger == trigger && info->polarity == pol &&
513 		    (info->dmode == IOSAPIC_FIXED ||
514 		     info->dmode == IOSAPIC_LOWEST_PRIORITY) &&
515 		    can_request_irq(i, IRQF_SHARED)) {
516 			if (min_count == -1 || info->count < min_count) {
517 				irq = i;
518 				min_count = info->count;
519 			}
520 		}
521 	}
522 	return irq;
523 }
524 
525 /*
526  * if the given vector is already owned by other,
527  *  assign a new vector for the other and make the vector available
528  */
529 static void __init
530 iosapic_reassign_vector (int irq)
531 {
532 	int new_irq;
533 
534 	if (iosapic_intr_info[irq].count) {
535 		new_irq = create_irq();
536 		if (new_irq < 0)
537 			panic("%s: out of interrupt vectors!\n", __func__);
538 		printk(KERN_INFO "Reassigning vector %d to %d\n",
539 		       irq_to_vector(irq), irq_to_vector(new_irq));
540 		memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq],
541 		       sizeof(struct iosapic_intr_info));
542 		INIT_LIST_HEAD(&iosapic_intr_info[new_irq].rtes);
543 		list_move(iosapic_intr_info[irq].rtes.next,
544 			  &iosapic_intr_info[new_irq].rtes);
545 		memset(&iosapic_intr_info[irq], 0,
546 		       sizeof(struct iosapic_intr_info));
547 		iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
548 		INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
549 	}
550 }
551 
552 static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void)
553 {
554 	int i;
555 	struct iosapic_rte_info *rte;
556 	int preallocated = 0;
557 
558 	if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
559 		rte = alloc_bootmem(sizeof(struct iosapic_rte_info) *
560 				    NR_PREALLOCATE_RTE_ENTRIES);
561 		if (!rte)
562 			return NULL;
563 		for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
564 			list_add(&rte->rte_list, &free_rte_list);
565 	}
566 
567 	if (!list_empty(&free_rte_list)) {
568 		rte = list_entry(free_rte_list.next, struct iosapic_rte_info,
569 				 rte_list);
570 		list_del(&rte->rte_list);
571 		preallocated++;
572 	} else {
573 		rte = kmalloc(sizeof(struct iosapic_rte_info), GFP_ATOMIC);
574 		if (!rte)
575 			return NULL;
576 	}
577 
578 	memset(rte, 0, sizeof(struct iosapic_rte_info));
579 	if (preallocated)
580 		rte->flags |= RTE_PREALLOCATED;
581 
582 	return rte;
583 }
584 
585 static inline int irq_is_shared (int irq)
586 {
587 	return (iosapic_intr_info[irq].count > 1);
588 }
589 
590 static int
591 register_intr (unsigned int gsi, int irq, unsigned char delivery,
592 	       unsigned long polarity, unsigned long trigger)
593 {
594 	irq_desc_t *idesc;
595 	struct hw_interrupt_type *irq_type;
596 	int index;
597 	struct iosapic_rte_info *rte;
598 
599 	index = find_iosapic(gsi);
600 	if (index < 0) {
601 		printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
602 		       __func__, gsi);
603 		return -ENODEV;
604 	}
605 
606 	rte = find_rte(irq, gsi);
607 	if (!rte) {
608 		rte = iosapic_alloc_rte();
609 		if (!rte) {
610 			printk(KERN_WARNING "%s: cannot allocate memory\n",
611 			       __func__);
612 			return -ENOMEM;
613 		}
614 
615 		rte->iosapic	= &iosapic_lists[index];
616 		rte->rte_index	= gsi - rte->iosapic->gsi_base;
617 		rte->refcnt++;
618 		list_add_tail(&rte->rte_list, &iosapic_intr_info[irq].rtes);
619 		iosapic_intr_info[irq].count++;
620 		iosapic_lists[index].rtes_inuse++;
621 	}
622 	else if (rte->refcnt == NO_REF_RTE) {
623 		struct iosapic_intr_info *info = &iosapic_intr_info[irq];
624 		if (info->count > 0 &&
625 		    (info->trigger != trigger || info->polarity != polarity)){
626 			printk (KERN_WARNING
627 				"%s: cannot override the interrupt\n",
628 				__func__);
629 			return -EINVAL;
630 		}
631 		rte->refcnt++;
632 		iosapic_intr_info[irq].count++;
633 		iosapic_lists[index].rtes_inuse++;
634 	}
635 
636 	iosapic_intr_info[irq].polarity = polarity;
637 	iosapic_intr_info[irq].dmode    = delivery;
638 	iosapic_intr_info[irq].trigger  = trigger;
639 
640 	if (trigger == IOSAPIC_EDGE)
641 		irq_type = &irq_type_iosapic_edge;
642 	else
643 		irq_type = &irq_type_iosapic_level;
644 
645 	idesc = irq_desc + irq;
646 	if (idesc->chip != irq_type) {
647 		if (idesc->chip != &no_irq_type)
648 			printk(KERN_WARNING
649 			       "%s: changing vector %d from %s to %s\n",
650 			       __func__, irq_to_vector(irq),
651 			       idesc->chip->name, irq_type->name);
652 		idesc->chip = irq_type;
653 	}
654 	return 0;
655 }
656 
657 static unsigned int
658 get_target_cpu (unsigned int gsi, int irq)
659 {
660 #ifdef CONFIG_SMP
661 	static int cpu = -1;
662 	extern int cpe_vector;
663 	cpumask_t domain = irq_to_domain(irq);
664 
665 	/*
666 	 * In case of vector shared by multiple RTEs, all RTEs that
667 	 * share the vector need to use the same destination CPU.
668 	 */
669 	if (iosapic_intr_info[irq].count)
670 		return iosapic_intr_info[irq].dest;
671 
672 	/*
673 	 * If the platform supports redirection via XTP, let it
674 	 * distribute interrupts.
675 	 */
676 	if (smp_int_redirect & SMP_IRQ_REDIRECTION)
677 		return cpu_physical_id(smp_processor_id());
678 
679 	/*
680 	 * Some interrupts (ACPI SCI, for instance) are registered
681 	 * before the BSP is marked as online.
682 	 */
683 	if (!cpu_online(smp_processor_id()))
684 		return cpu_physical_id(smp_processor_id());
685 
686 #ifdef CONFIG_ACPI
687 	if (cpe_vector > 0 && irq_to_vector(irq) == IA64_CPEP_VECTOR)
688 		return get_cpei_target_cpu();
689 #endif
690 
691 #ifdef CONFIG_NUMA
692 	{
693 		int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0;
694 		cpumask_t cpu_mask;
695 
696 		iosapic_index = find_iosapic(gsi);
697 		if (iosapic_index < 0 ||
698 		    iosapic_lists[iosapic_index].node == MAX_NUMNODES)
699 			goto skip_numa_setup;
700 
701 		cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node);
702 		cpus_and(cpu_mask, cpu_mask, domain);
703 		for_each_cpu_mask(numa_cpu, cpu_mask) {
704 			if (!cpu_online(numa_cpu))
705 				cpu_clear(numa_cpu, cpu_mask);
706 		}
707 
708 		num_cpus = cpus_weight(cpu_mask);
709 
710 		if (!num_cpus)
711 			goto skip_numa_setup;
712 
713 		/* Use irq assignment to distribute across cpus in node */
714 		cpu_index = irq % num_cpus;
715 
716 		for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++)
717 			numa_cpu = next_cpu(numa_cpu, cpu_mask);
718 
719 		if (numa_cpu != NR_CPUS)
720 			return cpu_physical_id(numa_cpu);
721 	}
722 skip_numa_setup:
723 #endif
724 	/*
725 	 * Otherwise, round-robin interrupt vectors across all the
726 	 * processors.  (It'd be nice if we could be smarter in the
727 	 * case of NUMA.)
728 	 */
729 	do {
730 		if (++cpu >= NR_CPUS)
731 			cpu = 0;
732 	} while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
733 
734 	return cpu_physical_id(cpu);
735 #else  /* CONFIG_SMP */
736 	return cpu_physical_id(smp_processor_id());
737 #endif
738 }
739 
740 static inline unsigned char choose_dmode(void)
741 {
742 #ifdef CONFIG_SMP
743 	if (smp_int_redirect & SMP_IRQ_REDIRECTION)
744 		return IOSAPIC_LOWEST_PRIORITY;
745 #endif
746 	return IOSAPIC_FIXED;
747 }
748 
749 /*
750  * ACPI can describe IOSAPIC interrupts via static tables and namespace
751  * methods.  This provides an interface to register those interrupts and
752  * program the IOSAPIC RTE.
753  */
754 int
755 iosapic_register_intr (unsigned int gsi,
756 		       unsigned long polarity, unsigned long trigger)
757 {
758 	int irq, mask = 1, err;
759 	unsigned int dest;
760 	unsigned long flags;
761 	struct iosapic_rte_info *rte;
762 	u32 low32;
763 	unsigned char dmode;
764 
765 	/*
766 	 * If this GSI has already been registered (i.e., it's a
767 	 * shared interrupt, or we lost a race to register it),
768 	 * don't touch the RTE.
769 	 */
770 	spin_lock_irqsave(&iosapic_lock, flags);
771 	irq = __gsi_to_irq(gsi);
772 	if (irq > 0) {
773 		rte = find_rte(irq, gsi);
774 		if(iosapic_intr_info[irq].count == 0) {
775 			assign_irq_vector(irq);
776 			dynamic_irq_init(irq);
777 		} else if (rte->refcnt != NO_REF_RTE) {
778 			rte->refcnt++;
779 			goto unlock_iosapic_lock;
780 		}
781 	} else
782 		irq = create_irq();
783 
784 	/* If vector is running out, we try to find a sharable vector */
785 	if (irq < 0) {
786 		irq = iosapic_find_sharable_irq(trigger, polarity);
787 		if (irq < 0)
788 			goto unlock_iosapic_lock;
789 	}
790 
791 	spin_lock(&irq_desc[irq].lock);
792 	dest = get_target_cpu(gsi, irq);
793 	dmode = choose_dmode();
794 	err = register_intr(gsi, irq, dmode, polarity, trigger);
795 	if (err < 0) {
796 		spin_unlock(&irq_desc[irq].lock);
797 		irq = err;
798 		goto unlock_iosapic_lock;
799 	}
800 
801 	/*
802 	 * If the vector is shared and already unmasked for other
803 	 * interrupt sources, don't mask it.
804 	 */
805 	low32 = iosapic_intr_info[irq].low32;
806 	if (irq_is_shared(irq) && !(low32 & IOSAPIC_MASK))
807 		mask = 0;
808 	set_rte(gsi, irq, dest, mask);
809 
810 	printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
811 	       gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
812 	       (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
813 	       cpu_logical_id(dest), dest, irq_to_vector(irq));
814 
815 	spin_unlock(&irq_desc[irq].lock);
816  unlock_iosapic_lock:
817 	spin_unlock_irqrestore(&iosapic_lock, flags);
818 	return irq;
819 }
820 
821 void
822 iosapic_unregister_intr (unsigned int gsi)
823 {
824 	unsigned long flags;
825 	int irq, index;
826 	irq_desc_t *idesc;
827 	u32 low32;
828 	unsigned long trigger, polarity;
829 	unsigned int dest;
830 	struct iosapic_rte_info *rte;
831 
832 	/*
833 	 * If the irq associated with the gsi is not found,
834 	 * iosapic_unregister_intr() is unbalanced. We need to check
835 	 * this again after getting locks.
836 	 */
837 	irq = gsi_to_irq(gsi);
838 	if (irq < 0) {
839 		printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
840 		       gsi);
841 		WARN_ON(1);
842 		return;
843 	}
844 
845 	spin_lock_irqsave(&iosapic_lock, flags);
846 	if ((rte = find_rte(irq, gsi)) == NULL) {
847 		printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
848 		       gsi);
849 		WARN_ON(1);
850 		goto out;
851 	}
852 
853 	if (--rte->refcnt > 0)
854 		goto out;
855 
856 	idesc = irq_desc + irq;
857 	rte->refcnt = NO_REF_RTE;
858 
859 	/* Mask the interrupt */
860 	low32 = iosapic_intr_info[irq].low32 | IOSAPIC_MASK;
861 	iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte->rte_index), low32);
862 
863 	iosapic_intr_info[irq].count--;
864 	index = find_iosapic(gsi);
865 	iosapic_lists[index].rtes_inuse--;
866 	WARN_ON(iosapic_lists[index].rtes_inuse < 0);
867 
868 	trigger  = iosapic_intr_info[irq].trigger;
869 	polarity = iosapic_intr_info[irq].polarity;
870 	dest     = iosapic_intr_info[irq].dest;
871 	printk(KERN_INFO
872 	       "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n",
873 	       gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
874 	       (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
875 	       cpu_logical_id(dest), dest, irq_to_vector(irq));
876 
877 	if (iosapic_intr_info[irq].count == 0) {
878 #ifdef CONFIG_SMP
879 		/* Clear affinity */
880 		cpus_setall(idesc->affinity);
881 #endif
882 		/* Clear the interrupt information */
883 		iosapic_intr_info[irq].dest = 0;
884 		iosapic_intr_info[irq].dmode = 0;
885 		iosapic_intr_info[irq].polarity = 0;
886 		iosapic_intr_info[irq].trigger = 0;
887 		iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
888 
889 		/* Destroy and reserve IRQ */
890 		destroy_and_reserve_irq(irq);
891 	}
892  out:
893 	spin_unlock_irqrestore(&iosapic_lock, flags);
894 }
895 
896 /*
897  * ACPI calls this when it finds an entry for a platform interrupt.
898  */
899 int __init
900 iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
901 				int iosapic_vector, u16 eid, u16 id,
902 				unsigned long polarity, unsigned long trigger)
903 {
904 	static const char * const name[] = {"unknown", "PMI", "INIT", "CPEI"};
905 	unsigned char delivery;
906 	int irq, vector, mask = 0;
907 	unsigned int dest = ((id << 8) | eid) & 0xffff;
908 
909 	switch (int_type) {
910 	      case ACPI_INTERRUPT_PMI:
911 		irq = vector = iosapic_vector;
912 		bind_irq_vector(irq, vector, CPU_MASK_ALL);
913 		/*
914 		 * since PMI vector is alloc'd by FW(ACPI) not by kernel,
915 		 * we need to make sure the vector is available
916 		 */
917 		iosapic_reassign_vector(irq);
918 		delivery = IOSAPIC_PMI;
919 		break;
920 	      case ACPI_INTERRUPT_INIT:
921 		irq = create_irq();
922 		if (irq < 0)
923 			panic("%s: out of interrupt vectors!\n", __func__);
924 		vector = irq_to_vector(irq);
925 		delivery = IOSAPIC_INIT;
926 		break;
927 	      case ACPI_INTERRUPT_CPEI:
928 		irq = vector = IA64_CPE_VECTOR;
929 		BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
930 		delivery = IOSAPIC_FIXED;
931 		mask = 1;
932 		break;
933 	      default:
934 		printk(KERN_ERR "%s: invalid int type 0x%x\n", __func__,
935 		       int_type);
936 		return -1;
937 	}
938 
939 	register_intr(gsi, irq, delivery, polarity, trigger);
940 
941 	printk(KERN_INFO
942 	       "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)"
943 	       " vector %d\n",
944 	       int_type < ARRAY_SIZE(name) ? name[int_type] : "unknown",
945 	       int_type, gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
946 	       (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
947 	       cpu_logical_id(dest), dest, vector);
948 
949 	set_rte(gsi, irq, dest, mask);
950 	return vector;
951 }
952 
953 /*
954  * ACPI calls this when it finds an entry for a legacy ISA IRQ override.
955  */
956 void __devinit
957 iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
958 			  unsigned long polarity,
959 			  unsigned long trigger)
960 {
961 	int vector, irq;
962 	unsigned int dest = cpu_physical_id(smp_processor_id());
963 	unsigned char dmode;
964 
965 	irq = vector = isa_irq_to_vector(isa_irq);
966 	BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
967 	dmode = choose_dmode();
968 	register_intr(gsi, irq, dmode, polarity, trigger);
969 
970 	DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n",
971 	    isa_irq, gsi, trigger == IOSAPIC_EDGE ? "edge" : "level",
972 	    polarity == IOSAPIC_POL_HIGH ? "high" : "low",
973 	    cpu_logical_id(dest), dest, vector);
974 
975 	set_rte(gsi, irq, dest, 1);
976 }
977 
978 void __init
979 iosapic_system_init (int system_pcat_compat)
980 {
981 	int irq;
982 
983 	for (irq = 0; irq < NR_IRQS; ++irq) {
984 		iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
985 		/* mark as unused */
986 		INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
987 
988 		iosapic_intr_info[irq].count = 0;
989 	}
990 
991 	pcat_compat = system_pcat_compat;
992 	if (pcat_compat) {
993 		/*
994 		 * Disable the compatibility mode interrupts (8259 style),
995 		 * needs IN/OUT support enabled.
996 		 */
997 		printk(KERN_INFO
998 		       "%s: Disabling PC-AT compatible 8259 interrupts\n",
999 		       __func__);
1000 		outb(0xff, 0xA1);
1001 		outb(0xff, 0x21);
1002 	}
1003 }
1004 
1005 static inline int
1006 iosapic_alloc (void)
1007 {
1008 	int index;
1009 
1010 	for (index = 0; index < NR_IOSAPICS; index++)
1011 		if (!iosapic_lists[index].addr)
1012 			return index;
1013 
1014 	printk(KERN_WARNING "%s: failed to allocate iosapic\n", __func__);
1015 	return -1;
1016 }
1017 
1018 static inline void
1019 iosapic_free (int index)
1020 {
1021 	memset(&iosapic_lists[index], 0, sizeof(iosapic_lists[0]));
1022 }
1023 
1024 static inline int
1025 iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)
1026 {
1027 	int index;
1028 	unsigned int gsi_end, base, end;
1029 
1030 	/* check gsi range */
1031 	gsi_end = gsi_base + ((ver >> 16) & 0xff);
1032 	for (index = 0; index < NR_IOSAPICS; index++) {
1033 		if (!iosapic_lists[index].addr)
1034 			continue;
1035 
1036 		base = iosapic_lists[index].gsi_base;
1037 		end  = base + iosapic_lists[index].num_rte - 1;
1038 
1039 		if (gsi_end < base || end < gsi_base)
1040 			continue; /* OK */
1041 
1042 		return -EBUSY;
1043 	}
1044 	return 0;
1045 }
1046 
1047 int __devinit
1048 iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
1049 {
1050 	int num_rte, err, index;
1051 	unsigned int isa_irq, ver;
1052 	char __iomem *addr;
1053 	unsigned long flags;
1054 
1055 	spin_lock_irqsave(&iosapic_lock, flags);
1056 	index = find_iosapic(gsi_base);
1057 	if (index >= 0) {
1058 		spin_unlock_irqrestore(&iosapic_lock, flags);
1059 		return -EBUSY;
1060 	}
1061 
1062 	addr = ioremap(phys_addr, 0);
1063 	ver = iosapic_version(addr);
1064 	if ((err = iosapic_check_gsi_range(gsi_base, ver))) {
1065 		iounmap(addr);
1066 		spin_unlock_irqrestore(&iosapic_lock, flags);
1067 		return err;
1068 	}
1069 
1070 	/*
1071 	 * The MAX_REDIR register holds the highest input pin number
1072 	 * (starting from 0).  We add 1 so that we can use it for
1073 	 * number of pins (= RTEs)
1074 	 */
1075 	num_rte = ((ver >> 16) & 0xff) + 1;
1076 
1077 	index = iosapic_alloc();
1078 	iosapic_lists[index].addr = addr;
1079 	iosapic_lists[index].gsi_base = gsi_base;
1080 	iosapic_lists[index].num_rte = num_rte;
1081 #ifdef CONFIG_NUMA
1082 	iosapic_lists[index].node = MAX_NUMNODES;
1083 #endif
1084 	spin_lock_init(&iosapic_lists[index].lock);
1085 	spin_unlock_irqrestore(&iosapic_lock, flags);
1086 
1087 	if ((gsi_base == 0) && pcat_compat) {
1088 		/*
1089 		 * Map the legacy ISA devices into the IOSAPIC data.  Some of
1090 		 * these may get reprogrammed later on with data from the ACPI
1091 		 * Interrupt Source Override table.
1092 		 */
1093 		for (isa_irq = 0; isa_irq < 16; ++isa_irq)
1094 			iosapic_override_isa_irq(isa_irq, isa_irq,
1095 						 IOSAPIC_POL_HIGH,
1096 						 IOSAPIC_EDGE);
1097 	}
1098 	return 0;
1099 }
1100 
1101 #ifdef CONFIG_HOTPLUG
1102 int
1103 iosapic_remove (unsigned int gsi_base)
1104 {
1105 	int index, err = 0;
1106 	unsigned long flags;
1107 
1108 	spin_lock_irqsave(&iosapic_lock, flags);
1109 	index = find_iosapic(gsi_base);
1110 	if (index < 0) {
1111 		printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n",
1112 		       __func__, gsi_base);
1113 		goto out;
1114 	}
1115 
1116 	if (iosapic_lists[index].rtes_inuse) {
1117 		err = -EBUSY;
1118 		printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
1119 		       __func__, gsi_base);
1120 		goto out;
1121 	}
1122 
1123 	iounmap(iosapic_lists[index].addr);
1124 	iosapic_free(index);
1125  out:
1126 	spin_unlock_irqrestore(&iosapic_lock, flags);
1127 	return err;
1128 }
1129 #endif /* CONFIG_HOTPLUG */
1130 
1131 #ifdef CONFIG_NUMA
1132 void __devinit
1133 map_iosapic_to_node(unsigned int gsi_base, int node)
1134 {
1135 	int index;
1136 
1137 	index = find_iosapic(gsi_base);
1138 	if (index < 0) {
1139 		printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
1140 		       __func__, gsi_base);
1141 		return;
1142 	}
1143 	iosapic_lists[index].node = node;
1144 	return;
1145 }
1146 #endif
1147 
1148 static int __init iosapic_enable_kmalloc (void)
1149 {
1150 	iosapic_kmalloc_ok = 1;
1151 	return 0;
1152 }
1153 core_initcall (iosapic_enable_kmalloc);
1154