xref: /openbmc/linux/arch/ia64/kernel/iosapic.c (revision c21b37f6)
1 /*
2  * I/O SAPIC support.
3  *
4  * Copyright (C) 1999 Intel Corp.
5  * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
6  * Copyright (C) 2000-2002 J.I. Lee <jung-ik.lee@intel.com>
7  * Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co.
8  *	David Mosberger-Tang <davidm@hpl.hp.com>
9  * Copyright (C) 1999 VA Linux Systems
10  * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
11  *
12  * 00/04/19	D. Mosberger	Rewritten to mirror more closely the x86 I/O
13  *				APIC code.  In particular, we now have separate
14  *				handlers for edge and level triggered
15  *				interrupts.
16  * 00/10/27	Asit Mallick, Goutham Rao <goutham.rao@intel.com> IRQ vector
17  *				allocation PCI to vector mapping, shared PCI
18  *				interrupts.
19  * 00/10/27	D. Mosberger	Document things a bit more to make them more
20  *				understandable.  Clean up much of the old
21  *				IOSAPIC cruft.
22  * 01/07/27	J.I. Lee	PCI irq routing, Platform/Legacy interrupts
23  *				and fixes for ACPI S5(SoftOff) support.
24  * 02/01/23	J.I. Lee	iosapic pgm fixes for PCI irq routing from _PRT
25  * 02/01/07     E. Focht        <efocht@ess.nec.de> Redirectable interrupt
26  *				vectors in iosapic_set_affinity(),
27  *				initializations for /proc/irq/#/smp_affinity
28  * 02/04/02	P. Diefenbaugh	Cleaned up ACPI PCI IRQ routing.
29  * 02/04/18	J.I. Lee	bug fix in iosapic_init_pci_irq
30  * 02/04/30	J.I. Lee	bug fix in find_iosapic to fix ACPI PCI IRQ to
31  *				IOSAPIC mapping error
32  * 02/07/29	T. Kochi	Allocate interrupt vectors dynamically
33  * 02/08/04	T. Kochi	Cleaned up terminology (irq, global system
34  *				interrupt, vector, etc.)
35  * 02/09/20	D. Mosberger	Simplified by taking advantage of ACPI's
36  *				pci_irq code.
37  * 03/02/19	B. Helgaas	Make pcat_compat system-wide, not per-IOSAPIC.
38  *				Remove iosapic_address & gsi_base from
39  *				external interfaces.  Rationalize
40  *				__init/__devinit attributes.
41  * 04/12/04 Ashok Raj	<ashok.raj@intel.com> Intel Corporation 2004
42  *				Updated to work with irq migration necessary
43  *				for CPU Hotplug
44  */
45 /*
46  * Here is what the interrupt logic between a PCI device and the kernel looks
47  * like:
48  *
49  * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC,
50  *     INTD).  The device is uniquely identified by its bus-, and slot-number
51  *     (the function number does not matter here because all functions share
52  *     the same interrupt lines).
53  *
54  * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC
55  *     controller.  Multiple interrupt lines may have to share the same
56  *     IOSAPIC pin (if they're level triggered and use the same polarity).
57  *     Each interrupt line has a unique Global System Interrupt (GSI) number
58  *     which can be calculated as the sum of the controller's base GSI number
59  *     and the IOSAPIC pin number to which the line connects.
60  *
61  * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the
62  * IOSAPIC pin into the IA-64 interrupt vector.  This interrupt vector is then
63  * sent to the CPU.
64  *
65  * (4) The kernel recognizes an interrupt as an IRQ.  The IRQ interface is
66  *     used as architecture-independent interrupt handling mechanism in Linux.
67  *     As an IRQ is a number, we have to have
68  *     IA-64 interrupt vector number <-> IRQ number mapping.  On smaller
69  *     systems, we use one-to-one mapping between IA-64 vector and IRQ.  A
70  *     platform can implement platform_irq_to_vector(irq) and
71  *     platform_local_vector_to_irq(vector) APIs to differentiate the mapping.
72  *     Please see also include/asm-ia64/hw_irq.h for those APIs.
73  *
74  * To sum up, there are three levels of mappings involved:
75  *
76  *	PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ
77  *
78  * Note: The term "IRQ" is loosely used everywhere in Linux kernel to
79  * describeinterrupts.  Now we use "IRQ" only for Linux IRQ's.  ISA IRQ
80  * (isa_irq) is the only exception in this source code.
81  */
82 
83 #include <linux/acpi.h>
84 #include <linux/init.h>
85 #include <linux/irq.h>
86 #include <linux/kernel.h>
87 #include <linux/list.h>
88 #include <linux/pci.h>
89 #include <linux/smp.h>
90 #include <linux/string.h>
91 #include <linux/bootmem.h>
92 
93 #include <asm/delay.h>
94 #include <asm/hw_irq.h>
95 #include <asm/io.h>
96 #include <asm/iosapic.h>
97 #include <asm/machvec.h>
98 #include <asm/processor.h>
99 #include <asm/ptrace.h>
100 #include <asm/system.h>
101 
102 #undef DEBUG_INTERRUPT_ROUTING
103 
104 #ifdef DEBUG_INTERRUPT_ROUTING
105 #define DBG(fmt...)	printk(fmt)
106 #else
107 #define DBG(fmt...)
108 #endif
109 
110 #define NR_PREALLOCATE_RTE_ENTRIES \
111 	(PAGE_SIZE / sizeof(struct iosapic_rte_info))
112 #define RTE_PREALLOCATED	(1)
113 
114 static DEFINE_SPINLOCK(iosapic_lock);
115 
116 /*
117  * These tables map IA-64 vectors to the IOSAPIC pin that generates this
118  * vector.
119  */
120 
121 #define NO_REF_RTE	0
122 
123 static struct iosapic {
124 	char __iomem	*addr;		/* base address of IOSAPIC */
125 	unsigned int	gsi_base;	/* GSI base */
126 	unsigned short	num_rte;	/* # of RTEs on this IOSAPIC */
127 	int		rtes_inuse;	/* # of RTEs in use on this IOSAPIC */
128 #ifdef CONFIG_NUMA
129 	unsigned short	node;		/* numa node association via pxm */
130 #endif
131 	spinlock_t	lock;		/* lock for indirect reg access */
132 } iosapic_lists[NR_IOSAPICS];
133 
134 struct iosapic_rte_info {
135 	struct list_head rte_list;	/* RTEs sharing the same vector */
136 	char		rte_index;	/* IOSAPIC RTE index */
137 	int		refcnt;		/* reference counter */
138 	unsigned int	flags;		/* flags */
139 	struct iosapic	*iosapic;
140 } ____cacheline_aligned;
141 
142 static struct iosapic_intr_info {
143 	struct list_head rtes;		/* RTEs using this vector (empty =>
144 					 * not an IOSAPIC interrupt) */
145 	int		count;		/* # of RTEs that shares this vector */
146 	u32		low32;		/* current value of low word of
147 					 * Redirection table entry */
148 	unsigned int	dest;		/* destination CPU physical ID */
149 	unsigned char	dmode	: 3;	/* delivery mode (see iosapic.h) */
150 	unsigned char 	polarity: 1;	/* interrupt polarity
151 					 * (see iosapic.h) */
152 	unsigned char	trigger	: 1;	/* trigger mode (see iosapic.h) */
153 } iosapic_intr_info[NR_IRQS];
154 
155 static unsigned char pcat_compat __devinitdata;	/* 8259 compatibility flag */
156 
157 static int iosapic_kmalloc_ok;
158 static LIST_HEAD(free_rte_list);
159 
160 static inline void
161 iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val)
162 {
163 	unsigned long flags;
164 
165 	spin_lock_irqsave(&iosapic->lock, flags);
166 	__iosapic_write(iosapic->addr, reg, val);
167 	spin_unlock_irqrestore(&iosapic->lock, flags);
168 }
169 
170 /*
171  * Find an IOSAPIC associated with a GSI
172  */
173 static inline int
174 find_iosapic (unsigned int gsi)
175 {
176 	int i;
177 
178 	for (i = 0; i < NR_IOSAPICS; i++) {
179 		if ((unsigned) (gsi - iosapic_lists[i].gsi_base) <
180 		    iosapic_lists[i].num_rte)
181 			return i;
182 	}
183 
184 	return -1;
185 }
186 
187 static inline int __gsi_to_irq(unsigned int gsi)
188 {
189 	int irq;
190 	struct iosapic_intr_info *info;
191 	struct iosapic_rte_info *rte;
192 
193 	for (irq = 0; irq < NR_IRQS; irq++) {
194 		info = &iosapic_intr_info[irq];
195 		list_for_each_entry(rte, &info->rtes, rte_list)
196 			if (rte->iosapic->gsi_base + rte->rte_index == gsi)
197 				return irq;
198 	}
199 	return -1;
200 }
201 
202 /*
203  * Translate GSI number to the corresponding IA-64 interrupt vector.  If no
204  * entry exists, return -1.
205  */
206 inline int
207 gsi_to_vector (unsigned int gsi)
208 {
209 	int irq = __gsi_to_irq(gsi);
210 	if (check_irq_used(irq) < 0)
211 		return -1;
212 	return irq_to_vector(irq);
213 }
214 
215 int
216 gsi_to_irq (unsigned int gsi)
217 {
218 	unsigned long flags;
219 	int irq;
220 
221 	spin_lock_irqsave(&iosapic_lock, flags);
222 	irq = __gsi_to_irq(gsi);
223 	spin_unlock_irqrestore(&iosapic_lock, flags);
224 	return irq;
225 }
226 
227 static struct iosapic_rte_info *find_rte(unsigned int irq, unsigned int gsi)
228 {
229 	struct iosapic_rte_info *rte;
230 
231 	list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
232 		if (rte->iosapic->gsi_base + rte->rte_index == gsi)
233 			return rte;
234 	return NULL;
235 }
236 
237 static void
238 set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask)
239 {
240 	unsigned long pol, trigger, dmode;
241 	u32 low32, high32;
242 	int rte_index;
243 	char redir;
244 	struct iosapic_rte_info *rte;
245 	ia64_vector vector = irq_to_vector(irq);
246 
247 	DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest);
248 
249 	rte = find_rte(irq, gsi);
250 	if (!rte)
251 		return;		/* not an IOSAPIC interrupt */
252 
253 	rte_index = rte->rte_index;
254 	pol     = iosapic_intr_info[irq].polarity;
255 	trigger = iosapic_intr_info[irq].trigger;
256 	dmode   = iosapic_intr_info[irq].dmode;
257 
258 	redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0;
259 
260 #ifdef CONFIG_SMP
261 	set_irq_affinity_info(irq, (int)(dest & 0xffff), redir);
262 #endif
263 
264 	low32 = ((pol << IOSAPIC_POLARITY_SHIFT) |
265 		 (trigger << IOSAPIC_TRIGGER_SHIFT) |
266 		 (dmode << IOSAPIC_DELIVERY_SHIFT) |
267 		 ((mask ? 1 : 0) << IOSAPIC_MASK_SHIFT) |
268 		 vector);
269 
270 	/* dest contains both id and eid */
271 	high32 = (dest << IOSAPIC_DEST_SHIFT);
272 
273 	iosapic_write(rte->iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
274 	iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
275 	iosapic_intr_info[irq].low32 = low32;
276 	iosapic_intr_info[irq].dest = dest;
277 }
278 
279 static void
280 nop (unsigned int irq)
281 {
282 	/* do nothing... */
283 }
284 
285 
286 #ifdef CONFIG_KEXEC
287 void
288 kexec_disable_iosapic(void)
289 {
290 	struct iosapic_intr_info *info;
291 	struct iosapic_rte_info *rte;
292 	ia64_vector vec;
293 	int irq;
294 
295 	for (irq = 0; irq < NR_IRQS; irq++) {
296 		info = &iosapic_intr_info[irq];
297 		vec = irq_to_vector(irq);
298 		list_for_each_entry(rte, &info->rtes,
299 				rte_list) {
300 			iosapic_write(rte->iosapic,
301 					IOSAPIC_RTE_LOW(rte->rte_index),
302 					IOSAPIC_MASK|vec);
303 			iosapic_eoi(rte->iosapic->addr, vec);
304 		}
305 	}
306 }
307 #endif
308 
309 static void
310 mask_irq (unsigned int irq)
311 {
312 	u32 low32;
313 	int rte_index;
314 	struct iosapic_rte_info *rte;
315 
316 	if (list_empty(&iosapic_intr_info[irq].rtes))
317 		return;			/* not an IOSAPIC interrupt! */
318 
319 	/* set only the mask bit */
320 	low32 = iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
321 	list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
322 		rte_index = rte->rte_index;
323 		iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
324 	}
325 }
326 
327 static void
328 unmask_irq (unsigned int irq)
329 {
330 	u32 low32;
331 	int rte_index;
332 	struct iosapic_rte_info *rte;
333 
334 	if (list_empty(&iosapic_intr_info[irq].rtes))
335 		return;			/* not an IOSAPIC interrupt! */
336 
337 	low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK;
338 	list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
339 		rte_index = rte->rte_index;
340 		iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
341 	}
342 }
343 
344 
345 static void
346 iosapic_set_affinity (unsigned int irq, cpumask_t mask)
347 {
348 #ifdef CONFIG_SMP
349 	u32 high32, low32;
350 	int dest, rte_index;
351 	int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
352 	struct iosapic_rte_info *rte;
353 	struct iosapic *iosapic;
354 
355 	irq &= (~IA64_IRQ_REDIRECTED);
356 
357 	cpus_and(mask, mask, cpu_online_map);
358 	if (cpus_empty(mask))
359 		return;
360 
361 	if (reassign_irq_vector(irq, first_cpu(mask)))
362 		return;
363 
364 	dest = cpu_physical_id(first_cpu(mask));
365 
366 	if (list_empty(&iosapic_intr_info[irq].rtes))
367 		return;			/* not an IOSAPIC interrupt */
368 
369 	set_irq_affinity_info(irq, dest, redir);
370 
371 	/* dest contains both id and eid */
372 	high32 = dest << IOSAPIC_DEST_SHIFT;
373 
374 	low32 = iosapic_intr_info[irq].low32 & ~(7 << IOSAPIC_DELIVERY_SHIFT);
375 	if (redir)
376 		/* change delivery mode to lowest priority */
377 		low32 |= (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
378 	else
379 		/* change delivery mode to fixed */
380 		low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
381 	low32 &= IOSAPIC_VECTOR_MASK;
382 	low32 |= irq_to_vector(irq);
383 
384 	iosapic_intr_info[irq].low32 = low32;
385 	iosapic_intr_info[irq].dest = dest;
386 	list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
387 		iosapic = rte->iosapic;
388 		rte_index = rte->rte_index;
389 		iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
390 		iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
391 	}
392 #endif
393 }
394 
395 /*
396  * Handlers for level-triggered interrupts.
397  */
398 
399 static unsigned int
400 iosapic_startup_level_irq (unsigned int irq)
401 {
402 	unmask_irq(irq);
403 	return 0;
404 }
405 
406 static void
407 iosapic_end_level_irq (unsigned int irq)
408 {
409 	ia64_vector vec = irq_to_vector(irq);
410 	struct iosapic_rte_info *rte;
411 	int do_unmask_irq = 0;
412 
413 	if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
414 		do_unmask_irq = 1;
415 		mask_irq(irq);
416 	}
417 
418 	list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
419 		iosapic_eoi(rte->iosapic->addr, vec);
420 
421 	if (unlikely(do_unmask_irq)) {
422 		move_masked_irq(irq);
423 		unmask_irq(irq);
424 	}
425 }
426 
427 #define iosapic_shutdown_level_irq	mask_irq
428 #define iosapic_enable_level_irq	unmask_irq
429 #define iosapic_disable_level_irq	mask_irq
430 #define iosapic_ack_level_irq		nop
431 
432 struct irq_chip irq_type_iosapic_level = {
433 	.name =		"IO-SAPIC-level",
434 	.startup =	iosapic_startup_level_irq,
435 	.shutdown =	iosapic_shutdown_level_irq,
436 	.enable =	iosapic_enable_level_irq,
437 	.disable =	iosapic_disable_level_irq,
438 	.ack =		iosapic_ack_level_irq,
439 	.end =		iosapic_end_level_irq,
440 	.mask =		mask_irq,
441 	.unmask =	unmask_irq,
442 	.set_affinity =	iosapic_set_affinity
443 };
444 
445 /*
446  * Handlers for edge-triggered interrupts.
447  */
448 
449 static unsigned int
450 iosapic_startup_edge_irq (unsigned int irq)
451 {
452 	unmask_irq(irq);
453 	/*
454 	 * IOSAPIC simply drops interrupts pended while the
455 	 * corresponding pin was masked, so we can't know if an
456 	 * interrupt is pending already.  Let's hope not...
457 	 */
458 	return 0;
459 }
460 
461 static void
462 iosapic_ack_edge_irq (unsigned int irq)
463 {
464 	irq_desc_t *idesc = irq_desc + irq;
465 
466 	move_native_irq(irq);
467 	/*
468 	 * Once we have recorded IRQ_PENDING already, we can mask the
469 	 * interrupt for real. This prevents IRQ storms from unhandled
470 	 * devices.
471 	 */
472 	if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) ==
473 	    (IRQ_PENDING|IRQ_DISABLED))
474 		mask_irq(irq);
475 }
476 
477 #define iosapic_enable_edge_irq		unmask_irq
478 #define iosapic_disable_edge_irq	nop
479 #define iosapic_end_edge_irq		nop
480 
481 struct irq_chip irq_type_iosapic_edge = {
482 	.name =		"IO-SAPIC-edge",
483 	.startup =	iosapic_startup_edge_irq,
484 	.shutdown =	iosapic_disable_edge_irq,
485 	.enable =	iosapic_enable_edge_irq,
486 	.disable =	iosapic_disable_edge_irq,
487 	.ack =		iosapic_ack_edge_irq,
488 	.end =		iosapic_end_edge_irq,
489 	.mask =		mask_irq,
490 	.unmask =	unmask_irq,
491 	.set_affinity =	iosapic_set_affinity
492 };
493 
494 unsigned int
495 iosapic_version (char __iomem *addr)
496 {
497 	/*
498 	 * IOSAPIC Version Register return 32 bit structure like:
499 	 * {
500 	 *	unsigned int version   : 8;
501 	 *	unsigned int reserved1 : 8;
502 	 *	unsigned int max_redir : 8;
503 	 *	unsigned int reserved2 : 8;
504 	 * }
505 	 */
506 	return __iosapic_read(addr, IOSAPIC_VERSION);
507 }
508 
509 static int iosapic_find_sharable_irq(unsigned long trigger, unsigned long pol)
510 {
511 	int i, irq = -ENOSPC, min_count = -1;
512 	struct iosapic_intr_info *info;
513 
514 	/*
515 	 * shared vectors for edge-triggered interrupts are not
516 	 * supported yet
517 	 */
518 	if (trigger == IOSAPIC_EDGE)
519 		return -EINVAL;
520 
521 	for (i = 0; i <= NR_IRQS; i++) {
522 		info = &iosapic_intr_info[i];
523 		if (info->trigger == trigger && info->polarity == pol &&
524 		    (info->dmode == IOSAPIC_FIXED ||
525 		     info->dmode == IOSAPIC_LOWEST_PRIORITY) &&
526 		    can_request_irq(i, IRQF_SHARED)) {
527 			if (min_count == -1 || info->count < min_count) {
528 				irq = i;
529 				min_count = info->count;
530 			}
531 		}
532 	}
533 	return irq;
534 }
535 
536 /*
537  * if the given vector is already owned by other,
538  *  assign a new vector for the other and make the vector available
539  */
540 static void __init
541 iosapic_reassign_vector (int irq)
542 {
543 	int new_irq;
544 
545 	if (!list_empty(&iosapic_intr_info[irq].rtes)) {
546 		new_irq = create_irq();
547 		if (new_irq < 0)
548 			panic("%s: out of interrupt vectors!\n", __FUNCTION__);
549 		printk(KERN_INFO "Reassigning vector %d to %d\n",
550 		       irq_to_vector(irq), irq_to_vector(new_irq));
551 		memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq],
552 		       sizeof(struct iosapic_intr_info));
553 		INIT_LIST_HEAD(&iosapic_intr_info[new_irq].rtes);
554 		list_move(iosapic_intr_info[irq].rtes.next,
555 			  &iosapic_intr_info[new_irq].rtes);
556 		memset(&iosapic_intr_info[irq], 0,
557 		       sizeof(struct iosapic_intr_info));
558 		iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
559 		INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
560 	}
561 }
562 
563 static struct iosapic_rte_info *iosapic_alloc_rte (void)
564 {
565 	int i;
566 	struct iosapic_rte_info *rte;
567 	int preallocated = 0;
568 
569 	if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
570 		rte = alloc_bootmem(sizeof(struct iosapic_rte_info) *
571 				    NR_PREALLOCATE_RTE_ENTRIES);
572 		if (!rte)
573 			return NULL;
574 		for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
575 			list_add(&rte->rte_list, &free_rte_list);
576 	}
577 
578 	if (!list_empty(&free_rte_list)) {
579 		rte = list_entry(free_rte_list.next, struct iosapic_rte_info,
580 				 rte_list);
581 		list_del(&rte->rte_list);
582 		preallocated++;
583 	} else {
584 		rte = kmalloc(sizeof(struct iosapic_rte_info), GFP_ATOMIC);
585 		if (!rte)
586 			return NULL;
587 	}
588 
589 	memset(rte, 0, sizeof(struct iosapic_rte_info));
590 	if (preallocated)
591 		rte->flags |= RTE_PREALLOCATED;
592 
593 	return rte;
594 }
595 
596 static inline int irq_is_shared (int irq)
597 {
598 	return (iosapic_intr_info[irq].count > 1);
599 }
600 
601 static int
602 register_intr (unsigned int gsi, int irq, unsigned char delivery,
603 	       unsigned long polarity, unsigned long trigger)
604 {
605 	irq_desc_t *idesc;
606 	struct hw_interrupt_type *irq_type;
607 	int index;
608 	struct iosapic_rte_info *rte;
609 
610 	index = find_iosapic(gsi);
611 	if (index < 0) {
612 		printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
613 		       __FUNCTION__, gsi);
614 		return -ENODEV;
615 	}
616 
617 	rte = find_rte(irq, gsi);
618 	if (!rte) {
619 		rte = iosapic_alloc_rte();
620 		if (!rte) {
621 			printk(KERN_WARNING "%s: cannot allocate memory\n",
622 			       __FUNCTION__);
623 			return -ENOMEM;
624 		}
625 
626 		rte->iosapic	= &iosapic_lists[index];
627 		rte->rte_index	= gsi - rte->iosapic->gsi_base;
628 		rte->refcnt++;
629 		list_add_tail(&rte->rte_list, &iosapic_intr_info[irq].rtes);
630 		iosapic_intr_info[irq].count++;
631 		iosapic_lists[index].rtes_inuse++;
632 	}
633 	else if (rte->refcnt == NO_REF_RTE) {
634 		struct iosapic_intr_info *info = &iosapic_intr_info[irq];
635 		if (info->count > 0 &&
636 		    (info->trigger != trigger || info->polarity != polarity)){
637 			printk (KERN_WARNING
638 				"%s: cannot override the interrupt\n",
639 				__FUNCTION__);
640 			return -EINVAL;
641 		}
642 		rte->refcnt++;
643 		iosapic_intr_info[irq].count++;
644 		iosapic_lists[index].rtes_inuse++;
645 	}
646 
647 	iosapic_intr_info[irq].polarity = polarity;
648 	iosapic_intr_info[irq].dmode    = delivery;
649 	iosapic_intr_info[irq].trigger  = trigger;
650 
651 	if (trigger == IOSAPIC_EDGE)
652 		irq_type = &irq_type_iosapic_edge;
653 	else
654 		irq_type = &irq_type_iosapic_level;
655 
656 	idesc = irq_desc + irq;
657 	if (idesc->chip != irq_type) {
658 		if (idesc->chip != &no_irq_type)
659 			printk(KERN_WARNING
660 			       "%s: changing vector %d from %s to %s\n",
661 			       __FUNCTION__, irq_to_vector(irq),
662 			       idesc->chip->name, irq_type->name);
663 		idesc->chip = irq_type;
664 	}
665 	return 0;
666 }
667 
668 static unsigned int
669 get_target_cpu (unsigned int gsi, int irq)
670 {
671 #ifdef CONFIG_SMP
672 	static int cpu = -1;
673 	extern int cpe_vector;
674 	cpumask_t domain = irq_to_domain(irq);
675 
676 	/*
677 	 * In case of vector shared by multiple RTEs, all RTEs that
678 	 * share the vector need to use the same destination CPU.
679 	 */
680 	if (!list_empty(&iosapic_intr_info[irq].rtes))
681 		return iosapic_intr_info[irq].dest;
682 
683 	/*
684 	 * If the platform supports redirection via XTP, let it
685 	 * distribute interrupts.
686 	 */
687 	if (smp_int_redirect & SMP_IRQ_REDIRECTION)
688 		return cpu_physical_id(smp_processor_id());
689 
690 	/*
691 	 * Some interrupts (ACPI SCI, for instance) are registered
692 	 * before the BSP is marked as online.
693 	 */
694 	if (!cpu_online(smp_processor_id()))
695 		return cpu_physical_id(smp_processor_id());
696 
697 #ifdef CONFIG_ACPI
698 	if (cpe_vector > 0 && irq_to_vector(irq) == IA64_CPEP_VECTOR)
699 		return get_cpei_target_cpu();
700 #endif
701 
702 #ifdef CONFIG_NUMA
703 	{
704 		int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0;
705 		cpumask_t cpu_mask;
706 
707 		iosapic_index = find_iosapic(gsi);
708 		if (iosapic_index < 0 ||
709 		    iosapic_lists[iosapic_index].node == MAX_NUMNODES)
710 			goto skip_numa_setup;
711 
712 		cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node);
713 		cpus_and(cpu_mask, cpu_mask, domain);
714 		for_each_cpu_mask(numa_cpu, cpu_mask) {
715 			if (!cpu_online(numa_cpu))
716 				cpu_clear(numa_cpu, cpu_mask);
717 		}
718 
719 		num_cpus = cpus_weight(cpu_mask);
720 
721 		if (!num_cpus)
722 			goto skip_numa_setup;
723 
724 		/* Use irq assignment to distribute across cpus in node */
725 		cpu_index = irq % num_cpus;
726 
727 		for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++)
728 			numa_cpu = next_cpu(numa_cpu, cpu_mask);
729 
730 		if (numa_cpu != NR_CPUS)
731 			return cpu_physical_id(numa_cpu);
732 	}
733 skip_numa_setup:
734 #endif
735 	/*
736 	 * Otherwise, round-robin interrupt vectors across all the
737 	 * processors.  (It'd be nice if we could be smarter in the
738 	 * case of NUMA.)
739 	 */
740 	do {
741 		if (++cpu >= NR_CPUS)
742 			cpu = 0;
743 	} while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
744 
745 	return cpu_physical_id(cpu);
746 #else  /* CONFIG_SMP */
747 	return cpu_physical_id(smp_processor_id());
748 #endif
749 }
750 
751 /*
752  * ACPI can describe IOSAPIC interrupts via static tables and namespace
753  * methods.  This provides an interface to register those interrupts and
754  * program the IOSAPIC RTE.
755  */
756 int
757 iosapic_register_intr (unsigned int gsi,
758 		       unsigned long polarity, unsigned long trigger)
759 {
760 	int irq, mask = 1, err;
761 	unsigned int dest;
762 	unsigned long flags;
763 	struct iosapic_rte_info *rte;
764 	u32 low32;
765 
766 	/*
767 	 * If this GSI has already been registered (i.e., it's a
768 	 * shared interrupt, or we lost a race to register it),
769 	 * don't touch the RTE.
770 	 */
771 	spin_lock_irqsave(&iosapic_lock, flags);
772 	irq = __gsi_to_irq(gsi);
773 	if (irq > 0) {
774 		rte = find_rte(irq, gsi);
775 		if(iosapic_intr_info[irq].count == 0) {
776 			assign_irq_vector(irq);
777 			dynamic_irq_init(irq);
778 		} else if (rte->refcnt != NO_REF_RTE) {
779 			rte->refcnt++;
780 			goto unlock_iosapic_lock;
781 		}
782 	} else
783 		irq = create_irq();
784 
785 	/* If vector is running out, we try to find a sharable vector */
786 	if (irq < 0) {
787 		irq = iosapic_find_sharable_irq(trigger, polarity);
788 		if (irq < 0)
789 			goto unlock_iosapic_lock;
790 	}
791 
792 	spin_lock(&irq_desc[irq].lock);
793 	dest = get_target_cpu(gsi, irq);
794 	err = register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY,
795 			    polarity, trigger);
796 	if (err < 0) {
797 		irq = err;
798 		goto unlock_all;
799 	}
800 
801 	/*
802 	 * If the vector is shared and already unmasked for other
803 	 * interrupt sources, don't mask it.
804 	 */
805 	low32 = iosapic_intr_info[irq].low32;
806 	if (irq_is_shared(irq) && !(low32 & IOSAPIC_MASK))
807 		mask = 0;
808 	set_rte(gsi, irq, dest, mask);
809 
810 	printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
811 	       gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
812 	       (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
813 	       cpu_logical_id(dest), dest, irq_to_vector(irq));
814  unlock_all:
815 	spin_unlock(&irq_desc[irq].lock);
816  unlock_iosapic_lock:
817 	spin_unlock_irqrestore(&iosapic_lock, flags);
818 	return irq;
819 }
820 
821 void
822 iosapic_unregister_intr (unsigned int gsi)
823 {
824 	unsigned long flags;
825 	int irq, index;
826 	irq_desc_t *idesc;
827 	u32 low32;
828 	unsigned long trigger, polarity;
829 	unsigned int dest;
830 	struct iosapic_rte_info *rte;
831 
832 	/*
833 	 * If the irq associated with the gsi is not found,
834 	 * iosapic_unregister_intr() is unbalanced. We need to check
835 	 * this again after getting locks.
836 	 */
837 	irq = gsi_to_irq(gsi);
838 	if (irq < 0) {
839 		printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
840 		       gsi);
841 		WARN_ON(1);
842 		return;
843 	}
844 
845 	spin_lock_irqsave(&iosapic_lock, flags);
846 	if ((rte = find_rte(irq, gsi)) == NULL) {
847 		printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
848 		       gsi);
849 		WARN_ON(1);
850 		goto out;
851 	}
852 
853 	if (--rte->refcnt > 0)
854 		goto out;
855 
856 	idesc = irq_desc + irq;
857 	rte->refcnt = NO_REF_RTE;
858 
859 	/* Mask the interrupt */
860 	low32 = iosapic_intr_info[irq].low32 | IOSAPIC_MASK;
861 	iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte->rte_index), low32);
862 
863 	iosapic_intr_info[irq].count--;
864 	index = find_iosapic(gsi);
865 	iosapic_lists[index].rtes_inuse--;
866 	WARN_ON(iosapic_lists[index].rtes_inuse < 0);
867 
868 	trigger  = iosapic_intr_info[irq].trigger;
869 	polarity = iosapic_intr_info[irq].polarity;
870 	dest     = iosapic_intr_info[irq].dest;
871 	printk(KERN_INFO
872 	       "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n",
873 	       gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
874 	       (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
875 	       cpu_logical_id(dest), dest, irq_to_vector(irq));
876 
877 	if (iosapic_intr_info[irq].count == 0) {
878 #ifdef CONFIG_SMP
879 		/* Clear affinity */
880 		cpus_setall(idesc->affinity);
881 #endif
882 		/* Clear the interrupt information */
883 		iosapic_intr_info[irq].dest = 0;
884 		iosapic_intr_info[irq].dmode = 0;
885 		iosapic_intr_info[irq].polarity = 0;
886 		iosapic_intr_info[irq].trigger = 0;
887 		iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
888 
889 		/* Destroy and reserve IRQ */
890 		destroy_and_reserve_irq(irq);
891 	}
892  out:
893 	spin_unlock_irqrestore(&iosapic_lock, flags);
894 }
895 
896 /*
897  * ACPI calls this when it finds an entry for a platform interrupt.
898  */
899 int __init
900 iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
901 				int iosapic_vector, u16 eid, u16 id,
902 				unsigned long polarity, unsigned long trigger)
903 {
904 	static const char * const name[] = {"unknown", "PMI", "INIT", "CPEI"};
905 	unsigned char delivery;
906 	int irq, vector, mask = 0;
907 	unsigned int dest = ((id << 8) | eid) & 0xffff;
908 
909 	switch (int_type) {
910 	      case ACPI_INTERRUPT_PMI:
911 		irq = vector = iosapic_vector;
912 		bind_irq_vector(irq, vector, CPU_MASK_ALL);
913 		/*
914 		 * since PMI vector is alloc'd by FW(ACPI) not by kernel,
915 		 * we need to make sure the vector is available
916 		 */
917 		iosapic_reassign_vector(irq);
918 		delivery = IOSAPIC_PMI;
919 		break;
920 	      case ACPI_INTERRUPT_INIT:
921 		irq = create_irq();
922 		if (irq < 0)
923 			panic("%s: out of interrupt vectors!\n", __FUNCTION__);
924 		vector = irq_to_vector(irq);
925 		delivery = IOSAPIC_INIT;
926 		break;
927 	      case ACPI_INTERRUPT_CPEI:
928 		irq = vector = IA64_CPE_VECTOR;
929 		BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
930 		delivery = IOSAPIC_LOWEST_PRIORITY;
931 		mask = 1;
932 		break;
933 	      default:
934 		printk(KERN_ERR "%s: invalid int type 0x%x\n", __FUNCTION__,
935 		       int_type);
936 		return -1;
937 	}
938 
939 	register_intr(gsi, irq, delivery, polarity, trigger);
940 
941 	printk(KERN_INFO
942 	       "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)"
943 	       " vector %d\n",
944 	       int_type < ARRAY_SIZE(name) ? name[int_type] : "unknown",
945 	       int_type, gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
946 	       (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
947 	       cpu_logical_id(dest), dest, vector);
948 
949 	set_rte(gsi, irq, dest, mask);
950 	return vector;
951 }
952 
953 /*
954  * ACPI calls this when it finds an entry for a legacy ISA IRQ override.
955  */
956 void __devinit
957 iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
958 			  unsigned long polarity,
959 			  unsigned long trigger)
960 {
961 	int vector, irq;
962 	unsigned int dest = cpu_physical_id(smp_processor_id());
963 
964 	irq = vector = isa_irq_to_vector(isa_irq);
965 	BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
966 	register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY, polarity, trigger);
967 
968 	DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n",
969 	    isa_irq, gsi, trigger == IOSAPIC_EDGE ? "edge" : "level",
970 	    polarity == IOSAPIC_POL_HIGH ? "high" : "low",
971 	    cpu_logical_id(dest), dest, vector);
972 
973 	set_rte(gsi, irq, dest, 1);
974 }
975 
976 void __init
977 iosapic_system_init (int system_pcat_compat)
978 {
979 	int irq;
980 
981 	for (irq = 0; irq < NR_IRQS; ++irq) {
982 		iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
983 		/* mark as unused */
984 		INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
985 
986 		iosapic_intr_info[irq].count = 0;
987 	}
988 
989 	pcat_compat = system_pcat_compat;
990 	if (pcat_compat) {
991 		/*
992 		 * Disable the compatibility mode interrupts (8259 style),
993 		 * needs IN/OUT support enabled.
994 		 */
995 		printk(KERN_INFO
996 		       "%s: Disabling PC-AT compatible 8259 interrupts\n",
997 		       __FUNCTION__);
998 		outb(0xff, 0xA1);
999 		outb(0xff, 0x21);
1000 	}
1001 }
1002 
1003 static inline int
1004 iosapic_alloc (void)
1005 {
1006 	int index;
1007 
1008 	for (index = 0; index < NR_IOSAPICS; index++)
1009 		if (!iosapic_lists[index].addr)
1010 			return index;
1011 
1012 	printk(KERN_WARNING "%s: failed to allocate iosapic\n", __FUNCTION__);
1013 	return -1;
1014 }
1015 
1016 static inline void
1017 iosapic_free (int index)
1018 {
1019 	memset(&iosapic_lists[index], 0, sizeof(iosapic_lists[0]));
1020 }
1021 
1022 static inline int
1023 iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)
1024 {
1025 	int index;
1026 	unsigned int gsi_end, base, end;
1027 
1028 	/* check gsi range */
1029 	gsi_end = gsi_base + ((ver >> 16) & 0xff);
1030 	for (index = 0; index < NR_IOSAPICS; index++) {
1031 		if (!iosapic_lists[index].addr)
1032 			continue;
1033 
1034 		base = iosapic_lists[index].gsi_base;
1035 		end  = base + iosapic_lists[index].num_rte - 1;
1036 
1037 		if (gsi_end < base || end < gsi_base)
1038 			continue; /* OK */
1039 
1040 		return -EBUSY;
1041 	}
1042 	return 0;
1043 }
1044 
1045 int __devinit
1046 iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
1047 {
1048 	int num_rte, err, index;
1049 	unsigned int isa_irq, ver;
1050 	char __iomem *addr;
1051 	unsigned long flags;
1052 
1053 	spin_lock_irqsave(&iosapic_lock, flags);
1054 	index = find_iosapic(gsi_base);
1055 	if (index >= 0) {
1056 		spin_unlock_irqrestore(&iosapic_lock, flags);
1057 		return -EBUSY;
1058 	}
1059 
1060 	addr = ioremap(phys_addr, 0);
1061 	ver = iosapic_version(addr);
1062 	if ((err = iosapic_check_gsi_range(gsi_base, ver))) {
1063 		iounmap(addr);
1064 		spin_unlock_irqrestore(&iosapic_lock, flags);
1065 		return err;
1066 	}
1067 
1068 	/*
1069 	 * The MAX_REDIR register holds the highest input pin number
1070 	 * (starting from 0).  We add 1 so that we can use it for
1071 	 * number of pins (= RTEs)
1072 	 */
1073 	num_rte = ((ver >> 16) & 0xff) + 1;
1074 
1075 	index = iosapic_alloc();
1076 	iosapic_lists[index].addr = addr;
1077 	iosapic_lists[index].gsi_base = gsi_base;
1078 	iosapic_lists[index].num_rte = num_rte;
1079 #ifdef CONFIG_NUMA
1080 	iosapic_lists[index].node = MAX_NUMNODES;
1081 #endif
1082 	spin_lock_init(&iosapic_lists[index].lock);
1083 	spin_unlock_irqrestore(&iosapic_lock, flags);
1084 
1085 	if ((gsi_base == 0) && pcat_compat) {
1086 		/*
1087 		 * Map the legacy ISA devices into the IOSAPIC data.  Some of
1088 		 * these may get reprogrammed later on with data from the ACPI
1089 		 * Interrupt Source Override table.
1090 		 */
1091 		for (isa_irq = 0; isa_irq < 16; ++isa_irq)
1092 			iosapic_override_isa_irq(isa_irq, isa_irq,
1093 						 IOSAPIC_POL_HIGH,
1094 						 IOSAPIC_EDGE);
1095 	}
1096 	return 0;
1097 }
1098 
1099 #ifdef CONFIG_HOTPLUG
1100 int
1101 iosapic_remove (unsigned int gsi_base)
1102 {
1103 	int index, err = 0;
1104 	unsigned long flags;
1105 
1106 	spin_lock_irqsave(&iosapic_lock, flags);
1107 	index = find_iosapic(gsi_base);
1108 	if (index < 0) {
1109 		printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n",
1110 		       __FUNCTION__, gsi_base);
1111 		goto out;
1112 	}
1113 
1114 	if (iosapic_lists[index].rtes_inuse) {
1115 		err = -EBUSY;
1116 		printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
1117 		       __FUNCTION__, gsi_base);
1118 		goto out;
1119 	}
1120 
1121 	iounmap(iosapic_lists[index].addr);
1122 	iosapic_free(index);
1123  out:
1124 	spin_unlock_irqrestore(&iosapic_lock, flags);
1125 	return err;
1126 }
1127 #endif /* CONFIG_HOTPLUG */
1128 
1129 #ifdef CONFIG_NUMA
1130 void __devinit
1131 map_iosapic_to_node(unsigned int gsi_base, int node)
1132 {
1133 	int index;
1134 
1135 	index = find_iosapic(gsi_base);
1136 	if (index < 0) {
1137 		printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
1138 		       __FUNCTION__, gsi_base);
1139 		return;
1140 	}
1141 	iosapic_lists[index].node = node;
1142 	return;
1143 }
1144 #endif
1145 
1146 static int __init iosapic_enable_kmalloc (void)
1147 {
1148 	iosapic_kmalloc_ok = 1;
1149 	return 0;
1150 }
1151 core_initcall (iosapic_enable_kmalloc);
1152