xref: /openbmc/linux/arch/ia64/kernel/irq_ia64.c (revision 63dc02bd)
1 /*
2  * linux/arch/ia64/kernel/irq_ia64.c
3  *
4  * Copyright (C) 1998-2001 Hewlett-Packard Co
5  *	Stephane Eranian <eranian@hpl.hp.com>
6  *	David Mosberger-Tang <davidm@hpl.hp.com>
7  *
8  *  6/10/99: Updated to bring in sync with x86 version to facilitate
9  *	     support for SMP and different interrupt controllers.
10  *
11  * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
12  *                      PCI to vector allocation routine.
13  * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
14  *						Added CPU Hotplug handling for IPF.
15  */
16 
17 #include <linux/module.h>
18 
19 #include <linux/jiffies.h>
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/ptrace.h>
26 #include <linux/random.h>	/* for rand_initialize_irq() */
27 #include <linux/signal.h>
28 #include <linux/smp.h>
29 #include <linux/threads.h>
30 #include <linux/bitops.h>
31 #include <linux/irq.h>
32 #include <linux/ratelimit.h>
33 #include <linux/acpi.h>
34 #include <linux/sched.h>
35 
36 #include <asm/delay.h>
37 #include <asm/intrinsics.h>
38 #include <asm/io.h>
39 #include <asm/hw_irq.h>
40 #include <asm/machvec.h>
41 #include <asm/pgtable.h>
42 #include <asm/tlbflush.h>
43 
44 #ifdef CONFIG_PERFMON
45 # include <asm/perfmon.h>
46 #endif
47 
48 #define IRQ_DEBUG	0
49 
50 #define IRQ_VECTOR_UNASSIGNED	(0)
51 
52 #define IRQ_UNUSED		(0)
53 #define IRQ_USED		(1)
54 #define IRQ_RSVD		(2)
55 
56 /* These can be overridden in platform_irq_init */
57 int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
58 int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
59 
60 /* default base addr of IPI table */
61 void __iomem *ipi_base_addr = ((void __iomem *)
62 			       (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
63 
64 static cpumask_t vector_allocation_domain(int cpu);
65 
66 /*
67  * Legacy IRQ to IA-64 vector translation table.
68  */
69 __u8 isa_irq_to_vector_map[16] = {
70 	/* 8259 IRQ translation, first 16 entries */
71 	0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
72 	0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
73 };
74 EXPORT_SYMBOL(isa_irq_to_vector_map);
75 
76 DEFINE_SPINLOCK(vector_lock);
77 
78 struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
79 	[0 ... NR_IRQS - 1] = {
80 		.vector = IRQ_VECTOR_UNASSIGNED,
81 		.domain = CPU_MASK_NONE
82 	}
83 };
84 
85 DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
86 	[0 ... IA64_NUM_VECTORS - 1] = -1
87 };
88 
89 static cpumask_t vector_table[IA64_NUM_VECTORS] = {
90 	[0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
91 };
92 
93 static int irq_status[NR_IRQS] = {
94 	[0 ... NR_IRQS -1] = IRQ_UNUSED
95 };
96 
97 int check_irq_used(int irq)
98 {
99 	if (irq_status[irq] == IRQ_USED)
100 		return 1;
101 
102 	return -1;
103 }
104 
105 static inline int find_unassigned_irq(void)
106 {
107 	int irq;
108 
109 	for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
110 		if (irq_status[irq] == IRQ_UNUSED)
111 			return irq;
112 	return -ENOSPC;
113 }
114 
115 static inline int find_unassigned_vector(cpumask_t domain)
116 {
117 	cpumask_t mask;
118 	int pos, vector;
119 
120 	cpumask_and(&mask, &domain, cpu_online_mask);
121 	if (cpus_empty(mask))
122 		return -EINVAL;
123 
124 	for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
125 		vector = IA64_FIRST_DEVICE_VECTOR + pos;
126 		cpus_and(mask, domain, vector_table[vector]);
127 		if (!cpus_empty(mask))
128 			continue;
129 		return vector;
130 	}
131 	return -ENOSPC;
132 }
133 
134 static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
135 {
136 	cpumask_t mask;
137 	int cpu;
138 	struct irq_cfg *cfg = &irq_cfg[irq];
139 
140 	BUG_ON((unsigned)irq >= NR_IRQS);
141 	BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
142 
143 	cpumask_and(&mask, &domain, cpu_online_mask);
144 	if (cpus_empty(mask))
145 		return -EINVAL;
146 	if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
147 		return 0;
148 	if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
149 		return -EBUSY;
150 	for_each_cpu_mask(cpu, mask)
151 		per_cpu(vector_irq, cpu)[vector] = irq;
152 	cfg->vector = vector;
153 	cfg->domain = domain;
154 	irq_status[irq] = IRQ_USED;
155 	cpus_or(vector_table[vector], vector_table[vector], domain);
156 	return 0;
157 }
158 
159 int bind_irq_vector(int irq, int vector, cpumask_t domain)
160 {
161 	unsigned long flags;
162 	int ret;
163 
164 	spin_lock_irqsave(&vector_lock, flags);
165 	ret = __bind_irq_vector(irq, vector, domain);
166 	spin_unlock_irqrestore(&vector_lock, flags);
167 	return ret;
168 }
169 
170 static void __clear_irq_vector(int irq)
171 {
172 	int vector, cpu;
173 	cpumask_t mask;
174 	cpumask_t domain;
175 	struct irq_cfg *cfg = &irq_cfg[irq];
176 
177 	BUG_ON((unsigned)irq >= NR_IRQS);
178 	BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
179 	vector = cfg->vector;
180 	domain = cfg->domain;
181 	cpumask_and(&mask, &cfg->domain, cpu_online_mask);
182 	for_each_cpu_mask(cpu, mask)
183 		per_cpu(vector_irq, cpu)[vector] = -1;
184 	cfg->vector = IRQ_VECTOR_UNASSIGNED;
185 	cfg->domain = CPU_MASK_NONE;
186 	irq_status[irq] = IRQ_UNUSED;
187 	cpus_andnot(vector_table[vector], vector_table[vector], domain);
188 }
189 
190 static void clear_irq_vector(int irq)
191 {
192 	unsigned long flags;
193 
194 	spin_lock_irqsave(&vector_lock, flags);
195 	__clear_irq_vector(irq);
196 	spin_unlock_irqrestore(&vector_lock, flags);
197 }
198 
199 int
200 ia64_native_assign_irq_vector (int irq)
201 {
202 	unsigned long flags;
203 	int vector, cpu;
204 	cpumask_t domain = CPU_MASK_NONE;
205 
206 	vector = -ENOSPC;
207 
208 	spin_lock_irqsave(&vector_lock, flags);
209 	for_each_online_cpu(cpu) {
210 		domain = vector_allocation_domain(cpu);
211 		vector = find_unassigned_vector(domain);
212 		if (vector >= 0)
213 			break;
214 	}
215 	if (vector < 0)
216 		goto out;
217 	if (irq == AUTO_ASSIGN)
218 		irq = vector;
219 	BUG_ON(__bind_irq_vector(irq, vector, domain));
220  out:
221 	spin_unlock_irqrestore(&vector_lock, flags);
222 	return vector;
223 }
224 
225 void
226 ia64_native_free_irq_vector (int vector)
227 {
228 	if (vector < IA64_FIRST_DEVICE_VECTOR ||
229 	    vector > IA64_LAST_DEVICE_VECTOR)
230 		return;
231 	clear_irq_vector(vector);
232 }
233 
234 int
235 reserve_irq_vector (int vector)
236 {
237 	if (vector < IA64_FIRST_DEVICE_VECTOR ||
238 	    vector > IA64_LAST_DEVICE_VECTOR)
239 		return -EINVAL;
240 	return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
241 }
242 
243 /*
244  * Initialize vector_irq on a new cpu. This function must be called
245  * with vector_lock held.
246  */
247 void __setup_vector_irq(int cpu)
248 {
249 	int irq, vector;
250 
251 	/* Clear vector_irq */
252 	for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
253 		per_cpu(vector_irq, cpu)[vector] = -1;
254 	/* Mark the inuse vectors */
255 	for (irq = 0; irq < NR_IRQS; ++irq) {
256 		if (!cpu_isset(cpu, irq_cfg[irq].domain))
257 			continue;
258 		vector = irq_to_vector(irq);
259 		per_cpu(vector_irq, cpu)[vector] = irq;
260 	}
261 }
262 
263 #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
264 
265 static enum vector_domain_type {
266 	VECTOR_DOMAIN_NONE,
267 	VECTOR_DOMAIN_PERCPU
268 } vector_domain_type = VECTOR_DOMAIN_NONE;
269 
270 static cpumask_t vector_allocation_domain(int cpu)
271 {
272 	if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
273 		return cpumask_of_cpu(cpu);
274 	return CPU_MASK_ALL;
275 }
276 
277 static int __irq_prepare_move(int irq, int cpu)
278 {
279 	struct irq_cfg *cfg = &irq_cfg[irq];
280 	int vector;
281 	cpumask_t domain;
282 
283 	if (cfg->move_in_progress || cfg->move_cleanup_count)
284 		return -EBUSY;
285 	if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
286 		return -EINVAL;
287 	if (cpu_isset(cpu, cfg->domain))
288 		return 0;
289 	domain = vector_allocation_domain(cpu);
290 	vector = find_unassigned_vector(domain);
291 	if (vector < 0)
292 		return -ENOSPC;
293 	cfg->move_in_progress = 1;
294 	cfg->old_domain = cfg->domain;
295 	cfg->vector = IRQ_VECTOR_UNASSIGNED;
296 	cfg->domain = CPU_MASK_NONE;
297 	BUG_ON(__bind_irq_vector(irq, vector, domain));
298 	return 0;
299 }
300 
301 int irq_prepare_move(int irq, int cpu)
302 {
303 	unsigned long flags;
304 	int ret;
305 
306 	spin_lock_irqsave(&vector_lock, flags);
307 	ret = __irq_prepare_move(irq, cpu);
308 	spin_unlock_irqrestore(&vector_lock, flags);
309 	return ret;
310 }
311 
312 void irq_complete_move(unsigned irq)
313 {
314 	struct irq_cfg *cfg = &irq_cfg[irq];
315 	cpumask_t cleanup_mask;
316 	int i;
317 
318 	if (likely(!cfg->move_in_progress))
319 		return;
320 
321 	if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
322 		return;
323 
324 	cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
325 	cfg->move_cleanup_count = cpus_weight(cleanup_mask);
326 	for_each_cpu_mask(i, cleanup_mask)
327 		platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
328 	cfg->move_in_progress = 0;
329 }
330 
331 static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
332 {
333 	int me = smp_processor_id();
334 	ia64_vector vector;
335 	unsigned long flags;
336 
337 	for (vector = IA64_FIRST_DEVICE_VECTOR;
338 	     vector < IA64_LAST_DEVICE_VECTOR; vector++) {
339 		int irq;
340 		struct irq_desc *desc;
341 		struct irq_cfg *cfg;
342 		irq = __get_cpu_var(vector_irq)[vector];
343 		if (irq < 0)
344 			continue;
345 
346 		desc = irq_to_desc(irq);
347 		cfg = irq_cfg + irq;
348 		raw_spin_lock(&desc->lock);
349 		if (!cfg->move_cleanup_count)
350 			goto unlock;
351 
352 		if (!cpu_isset(me, cfg->old_domain))
353 			goto unlock;
354 
355 		spin_lock_irqsave(&vector_lock, flags);
356 		__get_cpu_var(vector_irq)[vector] = -1;
357 		cpu_clear(me, vector_table[vector]);
358 		spin_unlock_irqrestore(&vector_lock, flags);
359 		cfg->move_cleanup_count--;
360 	unlock:
361 		raw_spin_unlock(&desc->lock);
362 	}
363 	return IRQ_HANDLED;
364 }
365 
366 static struct irqaction irq_move_irqaction = {
367 	.handler =	smp_irq_move_cleanup_interrupt,
368 	.flags =	IRQF_DISABLED,
369 	.name =		"irq_move"
370 };
371 
372 static int __init parse_vector_domain(char *arg)
373 {
374 	if (!arg)
375 		return -EINVAL;
376 	if (!strcmp(arg, "percpu")) {
377 		vector_domain_type = VECTOR_DOMAIN_PERCPU;
378 		no_int_routing = 1;
379 	}
380 	return 0;
381 }
382 early_param("vector", parse_vector_domain);
383 #else
384 static cpumask_t vector_allocation_domain(int cpu)
385 {
386 	return CPU_MASK_ALL;
387 }
388 #endif
389 
390 
391 void destroy_and_reserve_irq(unsigned int irq)
392 {
393 	unsigned long flags;
394 
395 	dynamic_irq_cleanup(irq);
396 
397 	spin_lock_irqsave(&vector_lock, flags);
398 	__clear_irq_vector(irq);
399 	irq_status[irq] = IRQ_RSVD;
400 	spin_unlock_irqrestore(&vector_lock, flags);
401 }
402 
403 /*
404  * Dynamic irq allocate and deallocation for MSI
405  */
406 int create_irq(void)
407 {
408 	unsigned long flags;
409 	int irq, vector, cpu;
410 	cpumask_t domain = CPU_MASK_NONE;
411 
412 	irq = vector = -ENOSPC;
413 	spin_lock_irqsave(&vector_lock, flags);
414 	for_each_online_cpu(cpu) {
415 		domain = vector_allocation_domain(cpu);
416 		vector = find_unassigned_vector(domain);
417 		if (vector >= 0)
418 			break;
419 	}
420 	if (vector < 0)
421 		goto out;
422 	irq = find_unassigned_irq();
423 	if (irq < 0)
424 		goto out;
425 	BUG_ON(__bind_irq_vector(irq, vector, domain));
426  out:
427 	spin_unlock_irqrestore(&vector_lock, flags);
428 	if (irq >= 0)
429 		dynamic_irq_init(irq);
430 	return irq;
431 }
432 
433 void destroy_irq(unsigned int irq)
434 {
435 	dynamic_irq_cleanup(irq);
436 	clear_irq_vector(irq);
437 }
438 
439 #ifdef CONFIG_SMP
440 #	define IS_RESCHEDULE(vec)	(vec == IA64_IPI_RESCHEDULE)
441 #	define IS_LOCAL_TLB_FLUSH(vec)	(vec == IA64_IPI_LOCAL_TLB_FLUSH)
442 #else
443 #	define IS_RESCHEDULE(vec)	(0)
444 #	define IS_LOCAL_TLB_FLUSH(vec)	(0)
445 #endif
446 /*
447  * That's where the IVT branches when we get an external
448  * interrupt. This branches to the correct hardware IRQ handler via
449  * function ptr.
450  */
451 void
452 ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
453 {
454 	struct pt_regs *old_regs = set_irq_regs(regs);
455 	unsigned long saved_tpr;
456 
457 #if IRQ_DEBUG
458 	{
459 		unsigned long bsp, sp;
460 
461 		/*
462 		 * Note: if the interrupt happened while executing in
463 		 * the context switch routine (ia64_switch_to), we may
464 		 * get a spurious stack overflow here.  This is
465 		 * because the register and the memory stack are not
466 		 * switched atomically.
467 		 */
468 		bsp = ia64_getreg(_IA64_REG_AR_BSP);
469 		sp = ia64_getreg(_IA64_REG_SP);
470 
471 		if ((sp - bsp) < 1024) {
472 			static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
473 
474 			if (__ratelimit(&ratelimit)) {
475 				printk("ia64_handle_irq: DANGER: less than "
476 				       "1KB of free stack space!!\n"
477 				       "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
478 			}
479 		}
480 	}
481 #endif /* IRQ_DEBUG */
482 
483 	/*
484 	 * Always set TPR to limit maximum interrupt nesting depth to
485 	 * 16 (without this, it would be ~240, which could easily lead
486 	 * to kernel stack overflows).
487 	 */
488 	irq_enter();
489 	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
490 	ia64_srlz_d();
491 	while (vector != IA64_SPURIOUS_INT_VECTOR) {
492 		int irq = local_vector_to_irq(vector);
493 		struct irq_desc *desc = irq_to_desc(irq);
494 
495 		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
496 			smp_local_flush_tlb();
497 			kstat_incr_irqs_this_cpu(irq, desc);
498 		} else if (unlikely(IS_RESCHEDULE(vector))) {
499 			scheduler_ipi();
500 			kstat_incr_irqs_this_cpu(irq, desc);
501 		} else {
502 			ia64_setreg(_IA64_REG_CR_TPR, vector);
503 			ia64_srlz_d();
504 
505 			if (unlikely(irq < 0)) {
506 				printk(KERN_ERR "%s: Unexpected interrupt "
507 				       "vector %d on CPU %d is not mapped "
508 				       "to any IRQ!\n", __func__, vector,
509 				       smp_processor_id());
510 			} else
511 				generic_handle_irq(irq);
512 
513 			/*
514 			 * Disable interrupts and send EOI:
515 			 */
516 			local_irq_disable();
517 			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
518 		}
519 		ia64_eoi();
520 		vector = ia64_get_ivr();
521 	}
522 	/*
523 	 * This must be done *after* the ia64_eoi().  For example, the keyboard softirq
524 	 * handler needs to be able to wait for further keyboard interrupts, which can't
525 	 * come through until ia64_eoi() has been done.
526 	 */
527 	irq_exit();
528 	set_irq_regs(old_regs);
529 }
530 
531 #ifdef CONFIG_HOTPLUG_CPU
532 /*
533  * This function emulates a interrupt processing when a cpu is about to be
534  * brought down.
535  */
536 void ia64_process_pending_intr(void)
537 {
538 	ia64_vector vector;
539 	unsigned long saved_tpr;
540 	extern unsigned int vectors_in_migration[NR_IRQS];
541 
542 	vector = ia64_get_ivr();
543 
544 	irq_enter();
545 	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
546 	ia64_srlz_d();
547 
548 	 /*
549 	  * Perform normal interrupt style processing
550 	  */
551 	while (vector != IA64_SPURIOUS_INT_VECTOR) {
552 		int irq = local_vector_to_irq(vector);
553 		struct irq_desc *desc = irq_to_desc(irq);
554 
555 		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
556 			smp_local_flush_tlb();
557 			kstat_incr_irqs_this_cpu(irq, desc);
558 		} else if (unlikely(IS_RESCHEDULE(vector))) {
559 			kstat_incr_irqs_this_cpu(irq, desc);
560 		} else {
561 			struct pt_regs *old_regs = set_irq_regs(NULL);
562 
563 			ia64_setreg(_IA64_REG_CR_TPR, vector);
564 			ia64_srlz_d();
565 
566 			/*
567 			 * Now try calling normal ia64_handle_irq as it would have got called
568 			 * from a real intr handler. Try passing null for pt_regs, hopefully
569 			 * it will work. I hope it works!.
570 			 * Probably could shared code.
571 			 */
572 			if (unlikely(irq < 0)) {
573 				printk(KERN_ERR "%s: Unexpected interrupt "
574 				       "vector %d on CPU %d not being mapped "
575 				       "to any IRQ!!\n", __func__, vector,
576 				       smp_processor_id());
577 			} else {
578 				vectors_in_migration[irq]=0;
579 				generic_handle_irq(irq);
580 			}
581 			set_irq_regs(old_regs);
582 
583 			/*
584 			 * Disable interrupts and send EOI
585 			 */
586 			local_irq_disable();
587 			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
588 		}
589 		ia64_eoi();
590 		vector = ia64_get_ivr();
591 	}
592 	irq_exit();
593 }
594 #endif
595 
596 
597 #ifdef CONFIG_SMP
598 
599 static irqreturn_t dummy_handler (int irq, void *dev_id)
600 {
601 	BUG();
602 }
603 
604 static struct irqaction ipi_irqaction = {
605 	.handler =	handle_IPI,
606 	.flags =	IRQF_DISABLED,
607 	.name =		"IPI"
608 };
609 
610 /*
611  * KVM uses this interrupt to force a cpu out of guest mode
612  */
613 static struct irqaction resched_irqaction = {
614 	.handler =	dummy_handler,
615 	.flags =	IRQF_DISABLED,
616 	.name =		"resched"
617 };
618 
619 static struct irqaction tlb_irqaction = {
620 	.handler =	dummy_handler,
621 	.flags =	IRQF_DISABLED,
622 	.name =		"tlb_flush"
623 };
624 
625 #endif
626 
627 void
628 ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
629 {
630 	unsigned int irq;
631 
632 	irq = vec;
633 	BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
634 	irq_set_status_flags(irq, IRQ_PER_CPU);
635 	irq_set_chip(irq, &irq_type_ia64_lsapic);
636 	if (action)
637 		setup_irq(irq, action);
638 	irq_set_handler(irq, handle_percpu_irq);
639 }
640 
641 void __init
642 ia64_native_register_ipi(void)
643 {
644 #ifdef CONFIG_SMP
645 	register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
646 	register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
647 	register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
648 #endif
649 }
650 
651 void __init
652 init_IRQ (void)
653 {
654 #ifdef CONFIG_ACPI
655 	acpi_boot_init();
656 #endif
657 	ia64_register_ipi();
658 	register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
659 #ifdef CONFIG_SMP
660 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
661 	if (vector_domain_type != VECTOR_DOMAIN_NONE)
662 		register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
663 #endif
664 #endif
665 #ifdef CONFIG_PERFMON
666 	pfm_init_percpu();
667 #endif
668 	platform_irq_init();
669 }
670 
671 void
672 ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
673 {
674 	void __iomem *ipi_addr;
675 	unsigned long ipi_data;
676 	unsigned long phys_cpu_id;
677 
678 	phys_cpu_id = cpu_physical_id(cpu);
679 
680 	/*
681 	 * cpu number is in 8bit ID and 8bit EID
682 	 */
683 
684 	ipi_data = (delivery_mode << 8) | (vector & 0xff);
685 	ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
686 
687 	writeq(ipi_data, ipi_addr);
688 }
689