xref: /openbmc/linux/drivers/xen/events/events_base.c (revision 3932b9ca)
1 /*
2  * Xen event channels
3  *
4  * Xen models interrupts with abstract event channels.  Because each
5  * domain gets 1024 event channels, but NR_IRQ is not that large, we
6  * must dynamically map irqs<->event channels.  The event channels
7  * interface with the rest of the kernel by defining a xen interrupt
8  * chip.  When an event is received, it is mapped to an irq and sent
9  * through the normal interrupt processing path.
10  *
11  * There are four kinds of events which can be mapped to an event
12  * channel:
13  *
14  * 1. Inter-domain notifications.  This includes all the virtual
15  *    device events, since they're driven by front-ends in another domain
16  *    (typically dom0).
17  * 2. VIRQs, typically used for timers.  These are per-cpu events.
18  * 3. IPIs.
19  * 4. PIRQs - Hardware interrupts.
20  *
21  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22  */
23 
24 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
25 
26 #include <linux/linkage.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/module.h>
30 #include <linux/string.h>
31 #include <linux/bootmem.h>
32 #include <linux/slab.h>
33 #include <linux/irqnr.h>
34 #include <linux/pci.h>
35 
36 #ifdef CONFIG_X86
37 #include <asm/desc.h>
38 #include <asm/ptrace.h>
39 #include <asm/irq.h>
40 #include <asm/idle.h>
41 #include <asm/io_apic.h>
42 #include <asm/xen/page.h>
43 #include <asm/xen/pci.h>
44 #endif
45 #include <asm/sync_bitops.h>
46 #include <asm/xen/hypercall.h>
47 #include <asm/xen/hypervisor.h>
48 
49 #include <xen/xen.h>
50 #include <xen/hvm.h>
51 #include <xen/xen-ops.h>
52 #include <xen/events.h>
53 #include <xen/interface/xen.h>
54 #include <xen/interface/event_channel.h>
55 #include <xen/interface/hvm/hvm_op.h>
56 #include <xen/interface/hvm/params.h>
57 #include <xen/interface/physdev.h>
58 #include <xen/interface/sched.h>
59 #include <xen/interface/vcpu.h>
60 #include <asm/hw_irq.h>
61 
62 #include "events_internal.h"
63 
64 const struct evtchn_ops *evtchn_ops;
65 
66 /*
67  * This lock protects updates to the following mapping and reference-count
68  * arrays. The lock does not need to be acquired to read the mapping tables.
69  */
70 static DEFINE_MUTEX(irq_mapping_update_lock);
71 
72 static LIST_HEAD(xen_irq_list_head);
73 
74 /* IRQ <-> VIRQ mapping. */
75 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
76 
77 /* IRQ <-> IPI mapping */
78 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
79 
80 int **evtchn_to_irq;
81 #ifdef CONFIG_X86
82 static unsigned long *pirq_eoi_map;
83 #endif
84 static bool (*pirq_needs_eoi)(unsigned irq);
85 
86 #define EVTCHN_ROW(e)  (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
87 #define EVTCHN_COL(e)  (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
88 #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
89 
90 /* Xen will never allocate port zero for any purpose. */
91 #define VALID_EVTCHN(chn)	((chn) != 0)
92 
93 static struct irq_chip xen_dynamic_chip;
94 static struct irq_chip xen_percpu_chip;
95 static struct irq_chip xen_pirq_chip;
96 static void enable_dynirq(struct irq_data *data);
97 static void disable_dynirq(struct irq_data *data);
98 
99 static void clear_evtchn_to_irq_row(unsigned row)
100 {
101 	unsigned col;
102 
103 	for (col = 0; col < EVTCHN_PER_ROW; col++)
104 		evtchn_to_irq[row][col] = -1;
105 }
106 
107 static void clear_evtchn_to_irq_all(void)
108 {
109 	unsigned row;
110 
111 	for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
112 		if (evtchn_to_irq[row] == NULL)
113 			continue;
114 		clear_evtchn_to_irq_row(row);
115 	}
116 }
117 
118 static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
119 {
120 	unsigned row;
121 	unsigned col;
122 
123 	if (evtchn >= xen_evtchn_max_channels())
124 		return -EINVAL;
125 
126 	row = EVTCHN_ROW(evtchn);
127 	col = EVTCHN_COL(evtchn);
128 
129 	if (evtchn_to_irq[row] == NULL) {
130 		/* Unallocated irq entries return -1 anyway */
131 		if (irq == -1)
132 			return 0;
133 
134 		evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
135 		if (evtchn_to_irq[row] == NULL)
136 			return -ENOMEM;
137 
138 		clear_evtchn_to_irq_row(row);
139 	}
140 
141 	evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
142 	return 0;
143 }
144 
145 int get_evtchn_to_irq(unsigned evtchn)
146 {
147 	if (evtchn >= xen_evtchn_max_channels())
148 		return -1;
149 	if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
150 		return -1;
151 	return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)];
152 }
153 
154 /* Get info for IRQ */
155 struct irq_info *info_for_irq(unsigned irq)
156 {
157 	return irq_get_handler_data(irq);
158 }
159 
160 /* Constructors for packed IRQ information. */
161 static int xen_irq_info_common_setup(struct irq_info *info,
162 				     unsigned irq,
163 				     enum xen_irq_type type,
164 				     unsigned evtchn,
165 				     unsigned short cpu)
166 {
167 	int ret;
168 
169 	BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
170 
171 	info->type = type;
172 	info->irq = irq;
173 	info->evtchn = evtchn;
174 	info->cpu = cpu;
175 
176 	ret = set_evtchn_to_irq(evtchn, irq);
177 	if (ret < 0)
178 		return ret;
179 
180 	irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
181 
182 	return xen_evtchn_port_setup(info);
183 }
184 
185 static int xen_irq_info_evtchn_setup(unsigned irq,
186 				     unsigned evtchn)
187 {
188 	struct irq_info *info = info_for_irq(irq);
189 
190 	return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
191 }
192 
193 static int xen_irq_info_ipi_setup(unsigned cpu,
194 				  unsigned irq,
195 				  unsigned evtchn,
196 				  enum ipi_vector ipi)
197 {
198 	struct irq_info *info = info_for_irq(irq);
199 
200 	info->u.ipi = ipi;
201 
202 	per_cpu(ipi_to_irq, cpu)[ipi] = irq;
203 
204 	return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
205 }
206 
207 static int xen_irq_info_virq_setup(unsigned cpu,
208 				   unsigned irq,
209 				   unsigned evtchn,
210 				   unsigned virq)
211 {
212 	struct irq_info *info = info_for_irq(irq);
213 
214 	info->u.virq = virq;
215 
216 	per_cpu(virq_to_irq, cpu)[virq] = irq;
217 
218 	return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
219 }
220 
221 static int xen_irq_info_pirq_setup(unsigned irq,
222 				   unsigned evtchn,
223 				   unsigned pirq,
224 				   unsigned gsi,
225 				   uint16_t domid,
226 				   unsigned char flags)
227 {
228 	struct irq_info *info = info_for_irq(irq);
229 
230 	info->u.pirq.pirq = pirq;
231 	info->u.pirq.gsi = gsi;
232 	info->u.pirq.domid = domid;
233 	info->u.pirq.flags = flags;
234 
235 	return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
236 }
237 
238 static void xen_irq_info_cleanup(struct irq_info *info)
239 {
240 	set_evtchn_to_irq(info->evtchn, -1);
241 	info->evtchn = 0;
242 }
243 
244 /*
245  * Accessors for packed IRQ information.
246  */
247 unsigned int evtchn_from_irq(unsigned irq)
248 {
249 	if (unlikely(WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq)))
250 		return 0;
251 
252 	return info_for_irq(irq)->evtchn;
253 }
254 
255 unsigned irq_from_evtchn(unsigned int evtchn)
256 {
257 	return get_evtchn_to_irq(evtchn);
258 }
259 EXPORT_SYMBOL_GPL(irq_from_evtchn);
260 
261 int irq_from_virq(unsigned int cpu, unsigned int virq)
262 {
263 	return per_cpu(virq_to_irq, cpu)[virq];
264 }
265 
266 static enum ipi_vector ipi_from_irq(unsigned irq)
267 {
268 	struct irq_info *info = info_for_irq(irq);
269 
270 	BUG_ON(info == NULL);
271 	BUG_ON(info->type != IRQT_IPI);
272 
273 	return info->u.ipi;
274 }
275 
276 static unsigned virq_from_irq(unsigned irq)
277 {
278 	struct irq_info *info = info_for_irq(irq);
279 
280 	BUG_ON(info == NULL);
281 	BUG_ON(info->type != IRQT_VIRQ);
282 
283 	return info->u.virq;
284 }
285 
286 static unsigned pirq_from_irq(unsigned irq)
287 {
288 	struct irq_info *info = info_for_irq(irq);
289 
290 	BUG_ON(info == NULL);
291 	BUG_ON(info->type != IRQT_PIRQ);
292 
293 	return info->u.pirq.pirq;
294 }
295 
296 static enum xen_irq_type type_from_irq(unsigned irq)
297 {
298 	return info_for_irq(irq)->type;
299 }
300 
301 unsigned cpu_from_irq(unsigned irq)
302 {
303 	return info_for_irq(irq)->cpu;
304 }
305 
306 unsigned int cpu_from_evtchn(unsigned int evtchn)
307 {
308 	int irq = get_evtchn_to_irq(evtchn);
309 	unsigned ret = 0;
310 
311 	if (irq != -1)
312 		ret = cpu_from_irq(irq);
313 
314 	return ret;
315 }
316 
317 #ifdef CONFIG_X86
318 static bool pirq_check_eoi_map(unsigned irq)
319 {
320 	return test_bit(pirq_from_irq(irq), pirq_eoi_map);
321 }
322 #endif
323 
324 static bool pirq_needs_eoi_flag(unsigned irq)
325 {
326 	struct irq_info *info = info_for_irq(irq);
327 	BUG_ON(info->type != IRQT_PIRQ);
328 
329 	return info->u.pirq.flags & PIRQ_NEEDS_EOI;
330 }
331 
332 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
333 {
334 	int irq = get_evtchn_to_irq(chn);
335 	struct irq_info *info = info_for_irq(irq);
336 
337 	BUG_ON(irq == -1);
338 #ifdef CONFIG_SMP
339 	cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(cpu));
340 #endif
341 	xen_evtchn_port_bind_to_cpu(info, cpu);
342 
343 	info->cpu = cpu;
344 }
345 
346 static void xen_evtchn_mask_all(void)
347 {
348 	unsigned int evtchn;
349 
350 	for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
351 		mask_evtchn(evtchn);
352 }
353 
354 /**
355  * notify_remote_via_irq - send event to remote end of event channel via irq
356  * @irq: irq of event channel to send event to
357  *
358  * Unlike notify_remote_via_evtchn(), this is safe to use across
359  * save/restore. Notifications on a broken connection are silently
360  * dropped.
361  */
362 void notify_remote_via_irq(int irq)
363 {
364 	int evtchn = evtchn_from_irq(irq);
365 
366 	if (VALID_EVTCHN(evtchn))
367 		notify_remote_via_evtchn(evtchn);
368 }
369 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
370 
371 static void xen_irq_init(unsigned irq)
372 {
373 	struct irq_info *info;
374 #ifdef CONFIG_SMP
375 	/* By default all event channels notify CPU#0. */
376 	cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(0));
377 #endif
378 
379 	info = kzalloc(sizeof(*info), GFP_KERNEL);
380 	if (info == NULL)
381 		panic("Unable to allocate metadata for IRQ%d\n", irq);
382 
383 	info->type = IRQT_UNBOUND;
384 	info->refcnt = -1;
385 
386 	irq_set_handler_data(irq, info);
387 
388 	list_add_tail(&info->list, &xen_irq_list_head);
389 }
390 
391 static int __must_check xen_allocate_irqs_dynamic(int nvec)
392 {
393 	int i, irq = irq_alloc_descs(-1, 0, nvec, -1);
394 
395 	if (irq >= 0) {
396 		for (i = 0; i < nvec; i++)
397 			xen_irq_init(irq + i);
398 	}
399 
400 	return irq;
401 }
402 
403 static inline int __must_check xen_allocate_irq_dynamic(void)
404 {
405 
406 	return xen_allocate_irqs_dynamic(1);
407 }
408 
409 static int __must_check xen_allocate_irq_gsi(unsigned gsi)
410 {
411 	int irq;
412 
413 	/*
414 	 * A PV guest has no concept of a GSI (since it has no ACPI
415 	 * nor access to/knowledge of the physical APICs). Therefore
416 	 * all IRQs are dynamically allocated from the entire IRQ
417 	 * space.
418 	 */
419 	if (xen_pv_domain() && !xen_initial_domain())
420 		return xen_allocate_irq_dynamic();
421 
422 	/* Legacy IRQ descriptors are already allocated by the arch. */
423 	if (gsi < NR_IRQS_LEGACY)
424 		irq = gsi;
425 	else
426 		irq = irq_alloc_desc_at(gsi, -1);
427 
428 	xen_irq_init(irq);
429 
430 	return irq;
431 }
432 
433 static void xen_free_irq(unsigned irq)
434 {
435 	struct irq_info *info = irq_get_handler_data(irq);
436 
437 	if (WARN_ON(!info))
438 		return;
439 
440 	list_del(&info->list);
441 
442 	irq_set_handler_data(irq, NULL);
443 
444 	WARN_ON(info->refcnt > 0);
445 
446 	kfree(info);
447 
448 	/* Legacy IRQ descriptors are managed by the arch. */
449 	if (irq < NR_IRQS_LEGACY)
450 		return;
451 
452 	irq_free_desc(irq);
453 }
454 
455 static void xen_evtchn_close(unsigned int port)
456 {
457 	struct evtchn_close close;
458 
459 	close.port = port;
460 	if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
461 		BUG();
462 }
463 
464 static void pirq_query_unmask(int irq)
465 {
466 	struct physdev_irq_status_query irq_status;
467 	struct irq_info *info = info_for_irq(irq);
468 
469 	BUG_ON(info->type != IRQT_PIRQ);
470 
471 	irq_status.irq = pirq_from_irq(irq);
472 	if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
473 		irq_status.flags = 0;
474 
475 	info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
476 	if (irq_status.flags & XENIRQSTAT_needs_eoi)
477 		info->u.pirq.flags |= PIRQ_NEEDS_EOI;
478 }
479 
480 static void eoi_pirq(struct irq_data *data)
481 {
482 	int evtchn = evtchn_from_irq(data->irq);
483 	struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
484 	int rc = 0;
485 
486 	irq_move_irq(data);
487 
488 	if (VALID_EVTCHN(evtchn))
489 		clear_evtchn(evtchn);
490 
491 	if (pirq_needs_eoi(data->irq)) {
492 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
493 		WARN_ON(rc);
494 	}
495 }
496 
497 static void mask_ack_pirq(struct irq_data *data)
498 {
499 	disable_dynirq(data);
500 	eoi_pirq(data);
501 }
502 
503 static unsigned int __startup_pirq(unsigned int irq)
504 {
505 	struct evtchn_bind_pirq bind_pirq;
506 	struct irq_info *info = info_for_irq(irq);
507 	int evtchn = evtchn_from_irq(irq);
508 	int rc;
509 
510 	BUG_ON(info->type != IRQT_PIRQ);
511 
512 	if (VALID_EVTCHN(evtchn))
513 		goto out;
514 
515 	bind_pirq.pirq = pirq_from_irq(irq);
516 	/* NB. We are happy to share unless we are probing. */
517 	bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
518 					BIND_PIRQ__WILL_SHARE : 0;
519 	rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
520 	if (rc != 0) {
521 		pr_warn("Failed to obtain physical IRQ %d\n", irq);
522 		return 0;
523 	}
524 	evtchn = bind_pirq.port;
525 
526 	pirq_query_unmask(irq);
527 
528 	rc = set_evtchn_to_irq(evtchn, irq);
529 	if (rc != 0) {
530 		pr_err("irq%d: Failed to set port to irq mapping (%d)\n",
531 		       irq, rc);
532 		xen_evtchn_close(evtchn);
533 		return 0;
534 	}
535 	bind_evtchn_to_cpu(evtchn, 0);
536 	info->evtchn = evtchn;
537 
538 out:
539 	unmask_evtchn(evtchn);
540 	eoi_pirq(irq_get_irq_data(irq));
541 
542 	return 0;
543 }
544 
545 static unsigned int startup_pirq(struct irq_data *data)
546 {
547 	return __startup_pirq(data->irq);
548 }
549 
550 static void shutdown_pirq(struct irq_data *data)
551 {
552 	unsigned int irq = data->irq;
553 	struct irq_info *info = info_for_irq(irq);
554 	unsigned evtchn = evtchn_from_irq(irq);
555 
556 	BUG_ON(info->type != IRQT_PIRQ);
557 
558 	if (!VALID_EVTCHN(evtchn))
559 		return;
560 
561 	mask_evtchn(evtchn);
562 	xen_evtchn_close(evtchn);
563 	xen_irq_info_cleanup(info);
564 }
565 
566 static void enable_pirq(struct irq_data *data)
567 {
568 	startup_pirq(data);
569 }
570 
571 static void disable_pirq(struct irq_data *data)
572 {
573 	disable_dynirq(data);
574 }
575 
576 int xen_irq_from_gsi(unsigned gsi)
577 {
578 	struct irq_info *info;
579 
580 	list_for_each_entry(info, &xen_irq_list_head, list) {
581 		if (info->type != IRQT_PIRQ)
582 			continue;
583 
584 		if (info->u.pirq.gsi == gsi)
585 			return info->irq;
586 	}
587 
588 	return -1;
589 }
590 EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
591 
592 static void __unbind_from_irq(unsigned int irq)
593 {
594 	int evtchn = evtchn_from_irq(irq);
595 	struct irq_info *info = irq_get_handler_data(irq);
596 
597 	if (info->refcnt > 0) {
598 		info->refcnt--;
599 		if (info->refcnt != 0)
600 			return;
601 	}
602 
603 	if (VALID_EVTCHN(evtchn)) {
604 		unsigned int cpu = cpu_from_irq(irq);
605 
606 		xen_evtchn_close(evtchn);
607 
608 		switch (type_from_irq(irq)) {
609 		case IRQT_VIRQ:
610 			per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
611 			break;
612 		case IRQT_IPI:
613 			per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
614 			break;
615 		default:
616 			break;
617 		}
618 
619 		xen_irq_info_cleanup(info);
620 	}
621 
622 	BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
623 
624 	xen_free_irq(irq);
625 }
626 
627 /*
628  * Do not make any assumptions regarding the relationship between the
629  * IRQ number returned here and the Xen pirq argument.
630  *
631  * Note: We don't assign an event channel until the irq actually started
632  * up.  Return an existing irq if we've already got one for the gsi.
633  *
634  * Shareable implies level triggered, not shareable implies edge
635  * triggered here.
636  */
637 int xen_bind_pirq_gsi_to_irq(unsigned gsi,
638 			     unsigned pirq, int shareable, char *name)
639 {
640 	int irq = -1;
641 	struct physdev_irq irq_op;
642 	int ret;
643 
644 	mutex_lock(&irq_mapping_update_lock);
645 
646 	irq = xen_irq_from_gsi(gsi);
647 	if (irq != -1) {
648 		pr_info("%s: returning irq %d for gsi %u\n",
649 			__func__, irq, gsi);
650 		goto out;
651 	}
652 
653 	irq = xen_allocate_irq_gsi(gsi);
654 	if (irq < 0)
655 		goto out;
656 
657 	irq_op.irq = irq;
658 	irq_op.vector = 0;
659 
660 	/* Only the privileged domain can do this. For non-priv, the pcifront
661 	 * driver provides a PCI bus that does the call to do exactly
662 	 * this in the priv domain. */
663 	if (xen_initial_domain() &&
664 	    HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
665 		xen_free_irq(irq);
666 		irq = -ENOSPC;
667 		goto out;
668 	}
669 
670 	ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
671 			       shareable ? PIRQ_SHAREABLE : 0);
672 	if (ret < 0) {
673 		__unbind_from_irq(irq);
674 		irq = ret;
675 		goto out;
676 	}
677 
678 	pirq_query_unmask(irq);
679 	/* We try to use the handler with the appropriate semantic for the
680 	 * type of interrupt: if the interrupt is an edge triggered
681 	 * interrupt we use handle_edge_irq.
682 	 *
683 	 * On the other hand if the interrupt is level triggered we use
684 	 * handle_fasteoi_irq like the native code does for this kind of
685 	 * interrupts.
686 	 *
687 	 * Depending on the Xen version, pirq_needs_eoi might return true
688 	 * not only for level triggered interrupts but for edge triggered
689 	 * interrupts too. In any case Xen always honors the eoi mechanism,
690 	 * not injecting any more pirqs of the same kind if the first one
691 	 * hasn't received an eoi yet. Therefore using the fasteoi handler
692 	 * is the right choice either way.
693 	 */
694 	if (shareable)
695 		irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
696 				handle_fasteoi_irq, name);
697 	else
698 		irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
699 				handle_edge_irq, name);
700 
701 out:
702 	mutex_unlock(&irq_mapping_update_lock);
703 
704 	return irq;
705 }
706 
707 #ifdef CONFIG_PCI_MSI
708 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
709 {
710 	int rc;
711 	struct physdev_get_free_pirq op_get_free_pirq;
712 
713 	op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
714 	rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
715 
716 	WARN_ONCE(rc == -ENOSYS,
717 		  "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
718 
719 	return rc ? -1 : op_get_free_pirq.pirq;
720 }
721 
722 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
723 			     int pirq, int nvec, const char *name, domid_t domid)
724 {
725 	int i, irq, ret;
726 
727 	mutex_lock(&irq_mapping_update_lock);
728 
729 	irq = xen_allocate_irqs_dynamic(nvec);
730 	if (irq < 0)
731 		goto out;
732 
733 	for (i = 0; i < nvec; i++) {
734 		irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
735 
736 		ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid,
737 					      i == 0 ? 0 : PIRQ_MSI_GROUP);
738 		if (ret < 0)
739 			goto error_irq;
740 	}
741 
742 	ret = irq_set_msi_desc(irq, msidesc);
743 	if (ret < 0)
744 		goto error_irq;
745 out:
746 	mutex_unlock(&irq_mapping_update_lock);
747 	return irq;
748 error_irq:
749 	for (; i >= 0; i--)
750 		__unbind_from_irq(irq + i);
751 	mutex_unlock(&irq_mapping_update_lock);
752 	return ret;
753 }
754 #endif
755 
756 int xen_destroy_irq(int irq)
757 {
758 	struct physdev_unmap_pirq unmap_irq;
759 	struct irq_info *info = info_for_irq(irq);
760 	int rc = -ENOENT;
761 
762 	mutex_lock(&irq_mapping_update_lock);
763 
764 	/*
765 	 * If trying to remove a vector in a MSI group different
766 	 * than the first one skip the PIRQ unmap unless this vector
767 	 * is the first one in the group.
768 	 */
769 	if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) {
770 		unmap_irq.pirq = info->u.pirq.pirq;
771 		unmap_irq.domid = info->u.pirq.domid;
772 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
773 		/* If another domain quits without making the pci_disable_msix
774 		 * call, the Xen hypervisor takes care of freeing the PIRQs
775 		 * (free_domain_pirqs).
776 		 */
777 		if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
778 			pr_info("domain %d does not have %d anymore\n",
779 				info->u.pirq.domid, info->u.pirq.pirq);
780 		else if (rc) {
781 			pr_warn("unmap irq failed %d\n", rc);
782 			goto out;
783 		}
784 	}
785 
786 	xen_free_irq(irq);
787 
788 out:
789 	mutex_unlock(&irq_mapping_update_lock);
790 	return rc;
791 }
792 
793 int xen_irq_from_pirq(unsigned pirq)
794 {
795 	int irq;
796 
797 	struct irq_info *info;
798 
799 	mutex_lock(&irq_mapping_update_lock);
800 
801 	list_for_each_entry(info, &xen_irq_list_head, list) {
802 		if (info->type != IRQT_PIRQ)
803 			continue;
804 		irq = info->irq;
805 		if (info->u.pirq.pirq == pirq)
806 			goto out;
807 	}
808 	irq = -1;
809 out:
810 	mutex_unlock(&irq_mapping_update_lock);
811 
812 	return irq;
813 }
814 
815 
816 int xen_pirq_from_irq(unsigned irq)
817 {
818 	return pirq_from_irq(irq);
819 }
820 EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
821 
822 int bind_evtchn_to_irq(unsigned int evtchn)
823 {
824 	int irq;
825 	int ret;
826 
827 	if (evtchn >= xen_evtchn_max_channels())
828 		return -ENOMEM;
829 
830 	mutex_lock(&irq_mapping_update_lock);
831 
832 	irq = get_evtchn_to_irq(evtchn);
833 
834 	if (irq == -1) {
835 		irq = xen_allocate_irq_dynamic();
836 		if (irq < 0)
837 			goto out;
838 
839 		irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
840 					      handle_edge_irq, "event");
841 
842 		ret = xen_irq_info_evtchn_setup(irq, evtchn);
843 		if (ret < 0) {
844 			__unbind_from_irq(irq);
845 			irq = ret;
846 			goto out;
847 		}
848 		/* New interdomain events are bound to VCPU 0. */
849 		bind_evtchn_to_cpu(evtchn, 0);
850 	} else {
851 		struct irq_info *info = info_for_irq(irq);
852 		WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
853 	}
854 
855 out:
856 	mutex_unlock(&irq_mapping_update_lock);
857 
858 	return irq;
859 }
860 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
861 
862 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
863 {
864 	struct evtchn_bind_ipi bind_ipi;
865 	int evtchn, irq;
866 	int ret;
867 
868 	mutex_lock(&irq_mapping_update_lock);
869 
870 	irq = per_cpu(ipi_to_irq, cpu)[ipi];
871 
872 	if (irq == -1) {
873 		irq = xen_allocate_irq_dynamic();
874 		if (irq < 0)
875 			goto out;
876 
877 		irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
878 					      handle_percpu_irq, "ipi");
879 
880 		bind_ipi.vcpu = cpu;
881 		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
882 						&bind_ipi) != 0)
883 			BUG();
884 		evtchn = bind_ipi.port;
885 
886 		ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
887 		if (ret < 0) {
888 			__unbind_from_irq(irq);
889 			irq = ret;
890 			goto out;
891 		}
892 		bind_evtchn_to_cpu(evtchn, cpu);
893 	} else {
894 		struct irq_info *info = info_for_irq(irq);
895 		WARN_ON(info == NULL || info->type != IRQT_IPI);
896 	}
897 
898  out:
899 	mutex_unlock(&irq_mapping_update_lock);
900 	return irq;
901 }
902 
903 static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
904 					  unsigned int remote_port)
905 {
906 	struct evtchn_bind_interdomain bind_interdomain;
907 	int err;
908 
909 	bind_interdomain.remote_dom  = remote_domain;
910 	bind_interdomain.remote_port = remote_port;
911 
912 	err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
913 					  &bind_interdomain);
914 
915 	return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
916 }
917 
918 static int find_virq(unsigned int virq, unsigned int cpu)
919 {
920 	struct evtchn_status status;
921 	int port, rc = -ENOENT;
922 
923 	memset(&status, 0, sizeof(status));
924 	for (port = 0; port < xen_evtchn_max_channels(); port++) {
925 		status.dom = DOMID_SELF;
926 		status.port = port;
927 		rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
928 		if (rc < 0)
929 			continue;
930 		if (status.status != EVTCHNSTAT_virq)
931 			continue;
932 		if (status.u.virq == virq && status.vcpu == cpu) {
933 			rc = port;
934 			break;
935 		}
936 	}
937 	return rc;
938 }
939 
940 /**
941  * xen_evtchn_nr_channels - number of usable event channel ports
942  *
943  * This may be less than the maximum supported by the current
944  * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum
945  * supported.
946  */
947 unsigned xen_evtchn_nr_channels(void)
948 {
949         return evtchn_ops->nr_channels();
950 }
951 EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
952 
953 int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
954 {
955 	struct evtchn_bind_virq bind_virq;
956 	int evtchn, irq, ret;
957 
958 	mutex_lock(&irq_mapping_update_lock);
959 
960 	irq = per_cpu(virq_to_irq, cpu)[virq];
961 
962 	if (irq == -1) {
963 		irq = xen_allocate_irq_dynamic();
964 		if (irq < 0)
965 			goto out;
966 
967 		irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
968 					      handle_percpu_irq, "virq");
969 
970 		bind_virq.virq = virq;
971 		bind_virq.vcpu = cpu;
972 		ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
973 						&bind_virq);
974 		if (ret == 0)
975 			evtchn = bind_virq.port;
976 		else {
977 			if (ret == -EEXIST)
978 				ret = find_virq(virq, cpu);
979 			BUG_ON(ret < 0);
980 			evtchn = ret;
981 		}
982 
983 		ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
984 		if (ret < 0) {
985 			__unbind_from_irq(irq);
986 			irq = ret;
987 			goto out;
988 		}
989 
990 		bind_evtchn_to_cpu(evtchn, cpu);
991 	} else {
992 		struct irq_info *info = info_for_irq(irq);
993 		WARN_ON(info == NULL || info->type != IRQT_VIRQ);
994 	}
995 
996 out:
997 	mutex_unlock(&irq_mapping_update_lock);
998 
999 	return irq;
1000 }
1001 
1002 static void unbind_from_irq(unsigned int irq)
1003 {
1004 	mutex_lock(&irq_mapping_update_lock);
1005 	__unbind_from_irq(irq);
1006 	mutex_unlock(&irq_mapping_update_lock);
1007 }
1008 
1009 int bind_evtchn_to_irqhandler(unsigned int evtchn,
1010 			      irq_handler_t handler,
1011 			      unsigned long irqflags,
1012 			      const char *devname, void *dev_id)
1013 {
1014 	int irq, retval;
1015 
1016 	irq = bind_evtchn_to_irq(evtchn);
1017 	if (irq < 0)
1018 		return irq;
1019 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1020 	if (retval != 0) {
1021 		unbind_from_irq(irq);
1022 		return retval;
1023 	}
1024 
1025 	return irq;
1026 }
1027 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
1028 
1029 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
1030 					  unsigned int remote_port,
1031 					  irq_handler_t handler,
1032 					  unsigned long irqflags,
1033 					  const char *devname,
1034 					  void *dev_id)
1035 {
1036 	int irq, retval;
1037 
1038 	irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
1039 	if (irq < 0)
1040 		return irq;
1041 
1042 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1043 	if (retval != 0) {
1044 		unbind_from_irq(irq);
1045 		return retval;
1046 	}
1047 
1048 	return irq;
1049 }
1050 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
1051 
1052 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
1053 			    irq_handler_t handler,
1054 			    unsigned long irqflags, const char *devname, void *dev_id)
1055 {
1056 	int irq, retval;
1057 
1058 	irq = bind_virq_to_irq(virq, cpu);
1059 	if (irq < 0)
1060 		return irq;
1061 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1062 	if (retval != 0) {
1063 		unbind_from_irq(irq);
1064 		return retval;
1065 	}
1066 
1067 	return irq;
1068 }
1069 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
1070 
1071 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1072 			   unsigned int cpu,
1073 			   irq_handler_t handler,
1074 			   unsigned long irqflags,
1075 			   const char *devname,
1076 			   void *dev_id)
1077 {
1078 	int irq, retval;
1079 
1080 	irq = bind_ipi_to_irq(ipi, cpu);
1081 	if (irq < 0)
1082 		return irq;
1083 
1084 	irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
1085 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1086 	if (retval != 0) {
1087 		unbind_from_irq(irq);
1088 		return retval;
1089 	}
1090 
1091 	return irq;
1092 }
1093 
1094 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1095 {
1096 	struct irq_info *info = irq_get_handler_data(irq);
1097 
1098 	if (WARN_ON(!info))
1099 		return;
1100 	free_irq(irq, dev_id);
1101 	unbind_from_irq(irq);
1102 }
1103 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1104 
1105 /**
1106  * xen_set_irq_priority() - set an event channel priority.
1107  * @irq:irq bound to an event channel.
1108  * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN.
1109  */
1110 int xen_set_irq_priority(unsigned irq, unsigned priority)
1111 {
1112 	struct evtchn_set_priority set_priority;
1113 
1114 	set_priority.port = evtchn_from_irq(irq);
1115 	set_priority.priority = priority;
1116 
1117 	return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority,
1118 					   &set_priority);
1119 }
1120 EXPORT_SYMBOL_GPL(xen_set_irq_priority);
1121 
1122 int evtchn_make_refcounted(unsigned int evtchn)
1123 {
1124 	int irq = get_evtchn_to_irq(evtchn);
1125 	struct irq_info *info;
1126 
1127 	if (irq == -1)
1128 		return -ENOENT;
1129 
1130 	info = irq_get_handler_data(irq);
1131 
1132 	if (!info)
1133 		return -ENOENT;
1134 
1135 	WARN_ON(info->refcnt != -1);
1136 
1137 	info->refcnt = 1;
1138 
1139 	return 0;
1140 }
1141 EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
1142 
1143 int evtchn_get(unsigned int evtchn)
1144 {
1145 	int irq;
1146 	struct irq_info *info;
1147 	int err = -ENOENT;
1148 
1149 	if (evtchn >= xen_evtchn_max_channels())
1150 		return -EINVAL;
1151 
1152 	mutex_lock(&irq_mapping_update_lock);
1153 
1154 	irq = get_evtchn_to_irq(evtchn);
1155 	if (irq == -1)
1156 		goto done;
1157 
1158 	info = irq_get_handler_data(irq);
1159 
1160 	if (!info)
1161 		goto done;
1162 
1163 	err = -EINVAL;
1164 	if (info->refcnt <= 0)
1165 		goto done;
1166 
1167 	info->refcnt++;
1168 	err = 0;
1169  done:
1170 	mutex_unlock(&irq_mapping_update_lock);
1171 
1172 	return err;
1173 }
1174 EXPORT_SYMBOL_GPL(evtchn_get);
1175 
1176 void evtchn_put(unsigned int evtchn)
1177 {
1178 	int irq = get_evtchn_to_irq(evtchn);
1179 	if (WARN_ON(irq == -1))
1180 		return;
1181 	unbind_from_irq(irq);
1182 }
1183 EXPORT_SYMBOL_GPL(evtchn_put);
1184 
1185 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1186 {
1187 	int irq;
1188 
1189 #ifdef CONFIG_X86
1190 	if (unlikely(vector == XEN_NMI_VECTOR)) {
1191 		int rc =  HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL);
1192 		if (rc < 0)
1193 			printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
1194 		return;
1195 	}
1196 #endif
1197 	irq = per_cpu(ipi_to_irq, cpu)[vector];
1198 	BUG_ON(irq < 0);
1199 	notify_remote_via_irq(irq);
1200 }
1201 
1202 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
1203 
1204 static void __xen_evtchn_do_upcall(void)
1205 {
1206 	struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1207 	int cpu = get_cpu();
1208 	unsigned count;
1209 
1210 	do {
1211 		vcpu_info->evtchn_upcall_pending = 0;
1212 
1213 		if (__this_cpu_inc_return(xed_nesting_count) - 1)
1214 			goto out;
1215 
1216 		xen_evtchn_handle_events(cpu);
1217 
1218 		BUG_ON(!irqs_disabled());
1219 
1220 		count = __this_cpu_read(xed_nesting_count);
1221 		__this_cpu_write(xed_nesting_count, 0);
1222 	} while (count != 1 || vcpu_info->evtchn_upcall_pending);
1223 
1224 out:
1225 
1226 	put_cpu();
1227 }
1228 
1229 void xen_evtchn_do_upcall(struct pt_regs *regs)
1230 {
1231 	struct pt_regs *old_regs = set_irq_regs(regs);
1232 
1233 	irq_enter();
1234 #ifdef CONFIG_X86
1235 	exit_idle();
1236 	inc_irq_stat(irq_hv_callback_count);
1237 #endif
1238 
1239 	__xen_evtchn_do_upcall();
1240 
1241 	irq_exit();
1242 	set_irq_regs(old_regs);
1243 }
1244 
1245 void xen_hvm_evtchn_do_upcall(void)
1246 {
1247 	__xen_evtchn_do_upcall();
1248 }
1249 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
1250 
1251 /* Rebind a new event channel to an existing irq. */
1252 void rebind_evtchn_irq(int evtchn, int irq)
1253 {
1254 	struct irq_info *info = info_for_irq(irq);
1255 
1256 	if (WARN_ON(!info))
1257 		return;
1258 
1259 	/* Make sure the irq is masked, since the new event channel
1260 	   will also be masked. */
1261 	disable_irq(irq);
1262 
1263 	mutex_lock(&irq_mapping_update_lock);
1264 
1265 	/* After resume the irq<->evtchn mappings are all cleared out */
1266 	BUG_ON(get_evtchn_to_irq(evtchn) != -1);
1267 	/* Expect irq to have been bound before,
1268 	   so there should be a proper type */
1269 	BUG_ON(info->type == IRQT_UNBOUND);
1270 
1271 	(void)xen_irq_info_evtchn_setup(irq, evtchn);
1272 
1273 	mutex_unlock(&irq_mapping_update_lock);
1274 
1275 	/* new event channels are always bound to cpu 0 */
1276 	irq_set_affinity(irq, cpumask_of(0));
1277 
1278 	/* Unmask the event channel. */
1279 	enable_irq(irq);
1280 }
1281 
1282 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1283 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1284 {
1285 	struct evtchn_bind_vcpu bind_vcpu;
1286 	int evtchn = evtchn_from_irq(irq);
1287 	int masked;
1288 
1289 	if (!VALID_EVTCHN(evtchn))
1290 		return -1;
1291 
1292 	/*
1293 	 * Events delivered via platform PCI interrupts are always
1294 	 * routed to vcpu 0 and hence cannot be rebound.
1295 	 */
1296 	if (xen_hvm_domain() && !xen_have_vector_callback)
1297 		return -1;
1298 
1299 	/* Send future instances of this interrupt to other vcpu. */
1300 	bind_vcpu.port = evtchn;
1301 	bind_vcpu.vcpu = tcpu;
1302 
1303 	/*
1304 	 * Mask the event while changing the VCPU binding to prevent
1305 	 * it being delivered on an unexpected VCPU.
1306 	 */
1307 	masked = test_and_set_mask(evtchn);
1308 
1309 	/*
1310 	 * If this fails, it usually just indicates that we're dealing with a
1311 	 * virq or IPI channel, which don't actually need to be rebound. Ignore
1312 	 * it, but don't do the xenlinux-level rebind in that case.
1313 	 */
1314 	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1315 		bind_evtchn_to_cpu(evtchn, tcpu);
1316 
1317 	if (!masked)
1318 		unmask_evtchn(evtchn);
1319 
1320 	return 0;
1321 }
1322 
1323 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1324 			    bool force)
1325 {
1326 	unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
1327 
1328 	return rebind_irq_to_cpu(data->irq, tcpu);
1329 }
1330 
1331 static void enable_dynirq(struct irq_data *data)
1332 {
1333 	int evtchn = evtchn_from_irq(data->irq);
1334 
1335 	if (VALID_EVTCHN(evtchn))
1336 		unmask_evtchn(evtchn);
1337 }
1338 
1339 static void disable_dynirq(struct irq_data *data)
1340 {
1341 	int evtchn = evtchn_from_irq(data->irq);
1342 
1343 	if (VALID_EVTCHN(evtchn))
1344 		mask_evtchn(evtchn);
1345 }
1346 
1347 static void ack_dynirq(struct irq_data *data)
1348 {
1349 	int evtchn = evtchn_from_irq(data->irq);
1350 
1351 	irq_move_irq(data);
1352 
1353 	if (VALID_EVTCHN(evtchn))
1354 		clear_evtchn(evtchn);
1355 }
1356 
1357 static void mask_ack_dynirq(struct irq_data *data)
1358 {
1359 	disable_dynirq(data);
1360 	ack_dynirq(data);
1361 }
1362 
1363 static int retrigger_dynirq(struct irq_data *data)
1364 {
1365 	unsigned int evtchn = evtchn_from_irq(data->irq);
1366 	int masked;
1367 
1368 	if (!VALID_EVTCHN(evtchn))
1369 		return 0;
1370 
1371 	masked = test_and_set_mask(evtchn);
1372 	set_evtchn(evtchn);
1373 	if (!masked)
1374 		unmask_evtchn(evtchn);
1375 
1376 	return 1;
1377 }
1378 
1379 static void restore_pirqs(void)
1380 {
1381 	int pirq, rc, irq, gsi;
1382 	struct physdev_map_pirq map_irq;
1383 	struct irq_info *info;
1384 
1385 	list_for_each_entry(info, &xen_irq_list_head, list) {
1386 		if (info->type != IRQT_PIRQ)
1387 			continue;
1388 
1389 		pirq = info->u.pirq.pirq;
1390 		gsi = info->u.pirq.gsi;
1391 		irq = info->irq;
1392 
1393 		/* save/restore of PT devices doesn't work, so at this point the
1394 		 * only devices present are GSI based emulated devices */
1395 		if (!gsi)
1396 			continue;
1397 
1398 		map_irq.domid = DOMID_SELF;
1399 		map_irq.type = MAP_PIRQ_TYPE_GSI;
1400 		map_irq.index = gsi;
1401 		map_irq.pirq = pirq;
1402 
1403 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1404 		if (rc) {
1405 			pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1406 				gsi, irq, pirq, rc);
1407 			xen_free_irq(irq);
1408 			continue;
1409 		}
1410 
1411 		printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1412 
1413 		__startup_pirq(irq);
1414 	}
1415 }
1416 
1417 static void restore_cpu_virqs(unsigned int cpu)
1418 {
1419 	struct evtchn_bind_virq bind_virq;
1420 	int virq, irq, evtchn;
1421 
1422 	for (virq = 0; virq < NR_VIRQS; virq++) {
1423 		if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1424 			continue;
1425 
1426 		BUG_ON(virq_from_irq(irq) != virq);
1427 
1428 		/* Get a new binding from Xen. */
1429 		bind_virq.virq = virq;
1430 		bind_virq.vcpu = cpu;
1431 		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1432 						&bind_virq) != 0)
1433 			BUG();
1434 		evtchn = bind_virq.port;
1435 
1436 		/* Record the new mapping. */
1437 		(void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
1438 		bind_evtchn_to_cpu(evtchn, cpu);
1439 	}
1440 }
1441 
1442 static void restore_cpu_ipis(unsigned int cpu)
1443 {
1444 	struct evtchn_bind_ipi bind_ipi;
1445 	int ipi, irq, evtchn;
1446 
1447 	for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1448 		if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1449 			continue;
1450 
1451 		BUG_ON(ipi_from_irq(irq) != ipi);
1452 
1453 		/* Get a new binding from Xen. */
1454 		bind_ipi.vcpu = cpu;
1455 		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1456 						&bind_ipi) != 0)
1457 			BUG();
1458 		evtchn = bind_ipi.port;
1459 
1460 		/* Record the new mapping. */
1461 		(void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
1462 		bind_evtchn_to_cpu(evtchn, cpu);
1463 	}
1464 }
1465 
1466 /* Clear an irq's pending state, in preparation for polling on it */
1467 void xen_clear_irq_pending(int irq)
1468 {
1469 	int evtchn = evtchn_from_irq(irq);
1470 
1471 	if (VALID_EVTCHN(evtchn))
1472 		clear_evtchn(evtchn);
1473 }
1474 EXPORT_SYMBOL(xen_clear_irq_pending);
1475 void xen_set_irq_pending(int irq)
1476 {
1477 	int evtchn = evtchn_from_irq(irq);
1478 
1479 	if (VALID_EVTCHN(evtchn))
1480 		set_evtchn(evtchn);
1481 }
1482 
1483 bool xen_test_irq_pending(int irq)
1484 {
1485 	int evtchn = evtchn_from_irq(irq);
1486 	bool ret = false;
1487 
1488 	if (VALID_EVTCHN(evtchn))
1489 		ret = test_evtchn(evtchn);
1490 
1491 	return ret;
1492 }
1493 
1494 /* Poll waiting for an irq to become pending with timeout.  In the usual case,
1495  * the irq will be disabled so it won't deliver an interrupt. */
1496 void xen_poll_irq_timeout(int irq, u64 timeout)
1497 {
1498 	evtchn_port_t evtchn = evtchn_from_irq(irq);
1499 
1500 	if (VALID_EVTCHN(evtchn)) {
1501 		struct sched_poll poll;
1502 
1503 		poll.nr_ports = 1;
1504 		poll.timeout = timeout;
1505 		set_xen_guest_handle(poll.ports, &evtchn);
1506 
1507 		if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1508 			BUG();
1509 	}
1510 }
1511 EXPORT_SYMBOL(xen_poll_irq_timeout);
1512 /* Poll waiting for an irq to become pending.  In the usual case, the
1513  * irq will be disabled so it won't deliver an interrupt. */
1514 void xen_poll_irq(int irq)
1515 {
1516 	xen_poll_irq_timeout(irq, 0 /* no timeout */);
1517 }
1518 
1519 /* Check whether the IRQ line is shared with other guests. */
1520 int xen_test_irq_shared(int irq)
1521 {
1522 	struct irq_info *info = info_for_irq(irq);
1523 	struct physdev_irq_status_query irq_status;
1524 
1525 	if (WARN_ON(!info))
1526 		return -ENOENT;
1527 
1528 	irq_status.irq = info->u.pirq.pirq;
1529 
1530 	if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1531 		return 0;
1532 	return !(irq_status.flags & XENIRQSTAT_shared);
1533 }
1534 EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1535 
1536 void xen_irq_resume(void)
1537 {
1538 	unsigned int cpu;
1539 	struct irq_info *info;
1540 
1541 	/* New event-channel space is not 'live' yet. */
1542 	xen_evtchn_mask_all();
1543 	xen_evtchn_resume();
1544 
1545 	/* No IRQ <-> event-channel mappings. */
1546 	list_for_each_entry(info, &xen_irq_list_head, list)
1547 		info->evtchn = 0; /* zap event-channel binding */
1548 
1549 	clear_evtchn_to_irq_all();
1550 
1551 	for_each_possible_cpu(cpu) {
1552 		restore_cpu_virqs(cpu);
1553 		restore_cpu_ipis(cpu);
1554 	}
1555 
1556 	restore_pirqs();
1557 }
1558 
1559 static struct irq_chip xen_dynamic_chip __read_mostly = {
1560 	.name			= "xen-dyn",
1561 
1562 	.irq_disable		= disable_dynirq,
1563 	.irq_mask		= disable_dynirq,
1564 	.irq_unmask		= enable_dynirq,
1565 
1566 	.irq_ack		= ack_dynirq,
1567 	.irq_mask_ack		= mask_ack_dynirq,
1568 
1569 	.irq_set_affinity	= set_affinity_irq,
1570 	.irq_retrigger		= retrigger_dynirq,
1571 };
1572 
1573 static struct irq_chip xen_pirq_chip __read_mostly = {
1574 	.name			= "xen-pirq",
1575 
1576 	.irq_startup		= startup_pirq,
1577 	.irq_shutdown		= shutdown_pirq,
1578 	.irq_enable		= enable_pirq,
1579 	.irq_disable		= disable_pirq,
1580 
1581 	.irq_mask		= disable_dynirq,
1582 	.irq_unmask		= enable_dynirq,
1583 
1584 	.irq_ack		= eoi_pirq,
1585 	.irq_eoi		= eoi_pirq,
1586 	.irq_mask_ack		= mask_ack_pirq,
1587 
1588 	.irq_set_affinity	= set_affinity_irq,
1589 
1590 	.irq_retrigger		= retrigger_dynirq,
1591 };
1592 
1593 static struct irq_chip xen_percpu_chip __read_mostly = {
1594 	.name			= "xen-percpu",
1595 
1596 	.irq_disable		= disable_dynirq,
1597 	.irq_mask		= disable_dynirq,
1598 	.irq_unmask		= enable_dynirq,
1599 
1600 	.irq_ack		= ack_dynirq,
1601 };
1602 
1603 int xen_set_callback_via(uint64_t via)
1604 {
1605 	struct xen_hvm_param a;
1606 	a.domid = DOMID_SELF;
1607 	a.index = HVM_PARAM_CALLBACK_IRQ;
1608 	a.value = via;
1609 	return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1610 }
1611 EXPORT_SYMBOL_GPL(xen_set_callback_via);
1612 
1613 #ifdef CONFIG_XEN_PVHVM
1614 /* Vector callbacks are better than PCI interrupts to receive event
1615  * channel notifications because we can receive vector callbacks on any
1616  * vcpu and we don't need PCI support or APIC interactions. */
1617 void xen_callback_vector(void)
1618 {
1619 	int rc;
1620 	uint64_t callback_via;
1621 	if (xen_have_vector_callback) {
1622 		callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
1623 		rc = xen_set_callback_via(callback_via);
1624 		if (rc) {
1625 			pr_err("Request for Xen HVM callback vector failed\n");
1626 			xen_have_vector_callback = 0;
1627 			return;
1628 		}
1629 		pr_info("Xen HVM callback vector for event delivery is enabled\n");
1630 		/* in the restore case the vector has already been allocated */
1631 		if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
1632 			alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
1633 					xen_hvm_callback_vector);
1634 	}
1635 }
1636 #else
1637 void xen_callback_vector(void) {}
1638 #endif
1639 
1640 #undef MODULE_PARAM_PREFIX
1641 #define MODULE_PARAM_PREFIX "xen."
1642 
1643 static bool fifo_events = true;
1644 module_param(fifo_events, bool, 0);
1645 
1646 void __init xen_init_IRQ(void)
1647 {
1648 	int ret = -EINVAL;
1649 
1650 	if (fifo_events)
1651 		ret = xen_evtchn_fifo_init();
1652 	if (ret < 0)
1653 		xen_evtchn_2l_init();
1654 
1655 	evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
1656 				sizeof(*evtchn_to_irq), GFP_KERNEL);
1657 	BUG_ON(!evtchn_to_irq);
1658 
1659 	/* No event channels are 'live' right now. */
1660 	xen_evtchn_mask_all();
1661 
1662 	pirq_needs_eoi = pirq_needs_eoi_flag;
1663 
1664 #ifdef CONFIG_X86
1665 	if (xen_pv_domain()) {
1666 		irq_ctx_init(smp_processor_id());
1667 		if (xen_initial_domain())
1668 			pci_xen_initial_domain();
1669 	}
1670 	if (xen_feature(XENFEAT_hvm_callback_vector))
1671 		xen_callback_vector();
1672 
1673 	if (xen_hvm_domain()) {
1674 		native_init_IRQ();
1675 		/* pci_xen_hvm_init must be called after native_init_IRQ so that
1676 		 * __acpi_register_gsi can point at the right function */
1677 		pci_xen_hvm_init();
1678 	} else {
1679 		int rc;
1680 		struct physdev_pirq_eoi_gmfn eoi_gmfn;
1681 
1682 		pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
1683 		eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map);
1684 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
1685 		/* TODO: No PVH support for PIRQ EOI */
1686 		if (rc != 0) {
1687 			free_page((unsigned long) pirq_eoi_map);
1688 			pirq_eoi_map = NULL;
1689 		} else
1690 			pirq_needs_eoi = pirq_check_eoi_map;
1691 	}
1692 #endif
1693 }
1694