xref: /openbmc/linux/drivers/xen/events/events_base.c (revision f6723b56)
1 /*
2  * Xen event channels
3  *
4  * Xen models interrupts with abstract event channels.  Because each
5  * domain gets 1024 event channels, but NR_IRQ is not that large, we
6  * must dynamically map irqs<->event channels.  The event channels
7  * interface with the rest of the kernel by defining a xen interrupt
8  * chip.  When an event is received, it is mapped to an irq and sent
9  * through the normal interrupt processing path.
10  *
11  * There are four kinds of events which can be mapped to an event
12  * channel:
13  *
14  * 1. Inter-domain notifications.  This includes all the virtual
15  *    device events, since they're driven by front-ends in another domain
16  *    (typically dom0).
17  * 2. VIRQs, typically used for timers.  These are per-cpu events.
18  * 3. IPIs.
19  * 4. PIRQs - Hardware interrupts.
20  *
21  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22  */
23 
24 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
25 
26 #include <linux/linkage.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/module.h>
30 #include <linux/string.h>
31 #include <linux/bootmem.h>
32 #include <linux/slab.h>
33 #include <linux/irqnr.h>
34 #include <linux/pci.h>
35 
36 #ifdef CONFIG_X86
37 #include <asm/desc.h>
38 #include <asm/ptrace.h>
39 #include <asm/irq.h>
40 #include <asm/idle.h>
41 #include <asm/io_apic.h>
42 #include <asm/xen/page.h>
43 #include <asm/xen/pci.h>
44 #endif
45 #include <asm/sync_bitops.h>
46 #include <asm/xen/hypercall.h>
47 #include <asm/xen/hypervisor.h>
48 
49 #include <xen/xen.h>
50 #include <xen/hvm.h>
51 #include <xen/xen-ops.h>
52 #include <xen/events.h>
53 #include <xen/interface/xen.h>
54 #include <xen/interface/event_channel.h>
55 #include <xen/interface/hvm/hvm_op.h>
56 #include <xen/interface/hvm/params.h>
57 #include <xen/interface/physdev.h>
58 #include <xen/interface/sched.h>
59 #include <xen/interface/vcpu.h>
60 #include <asm/hw_irq.h>
61 
62 #include "events_internal.h"
63 
64 const struct evtchn_ops *evtchn_ops;
65 
66 /*
67  * This lock protects updates to the following mapping and reference-count
68  * arrays. The lock does not need to be acquired to read the mapping tables.
69  */
70 static DEFINE_MUTEX(irq_mapping_update_lock);
71 
72 static LIST_HEAD(xen_irq_list_head);
73 
74 /* IRQ <-> VIRQ mapping. */
75 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
76 
77 /* IRQ <-> IPI mapping */
78 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
79 
80 int **evtchn_to_irq;
81 #ifdef CONFIG_X86
82 static unsigned long *pirq_eoi_map;
83 #endif
84 static bool (*pirq_needs_eoi)(unsigned irq);
85 
86 #define EVTCHN_ROW(e)  (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
87 #define EVTCHN_COL(e)  (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
88 #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
89 
90 /* Xen will never allocate port zero for any purpose. */
91 #define VALID_EVTCHN(chn)	((chn) != 0)
92 
93 static struct irq_chip xen_dynamic_chip;
94 static struct irq_chip xen_percpu_chip;
95 static struct irq_chip xen_pirq_chip;
96 static void enable_dynirq(struct irq_data *data);
97 static void disable_dynirq(struct irq_data *data);
98 
99 static void clear_evtchn_to_irq_row(unsigned row)
100 {
101 	unsigned col;
102 
103 	for (col = 0; col < EVTCHN_PER_ROW; col++)
104 		evtchn_to_irq[row][col] = -1;
105 }
106 
107 static void clear_evtchn_to_irq_all(void)
108 {
109 	unsigned row;
110 
111 	for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
112 		if (evtchn_to_irq[row] == NULL)
113 			continue;
114 		clear_evtchn_to_irq_row(row);
115 	}
116 }
117 
118 static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
119 {
120 	unsigned row;
121 	unsigned col;
122 
123 	if (evtchn >= xen_evtchn_max_channels())
124 		return -EINVAL;
125 
126 	row = EVTCHN_ROW(evtchn);
127 	col = EVTCHN_COL(evtchn);
128 
129 	if (evtchn_to_irq[row] == NULL) {
130 		/* Unallocated irq entries return -1 anyway */
131 		if (irq == -1)
132 			return 0;
133 
134 		evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
135 		if (evtchn_to_irq[row] == NULL)
136 			return -ENOMEM;
137 
138 		clear_evtchn_to_irq_row(row);
139 	}
140 
141 	evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
142 	return 0;
143 }
144 
145 int get_evtchn_to_irq(unsigned evtchn)
146 {
147 	if (evtchn >= xen_evtchn_max_channels())
148 		return -1;
149 	if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
150 		return -1;
151 	return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)];
152 }
153 
154 /* Get info for IRQ */
155 struct irq_info *info_for_irq(unsigned irq)
156 {
157 	return irq_get_handler_data(irq);
158 }
159 
160 /* Constructors for packed IRQ information. */
161 static int xen_irq_info_common_setup(struct irq_info *info,
162 				     unsigned irq,
163 				     enum xen_irq_type type,
164 				     unsigned evtchn,
165 				     unsigned short cpu)
166 {
167 	int ret;
168 
169 	BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
170 
171 	info->type = type;
172 	info->irq = irq;
173 	info->evtchn = evtchn;
174 	info->cpu = cpu;
175 
176 	ret = set_evtchn_to_irq(evtchn, irq);
177 	if (ret < 0)
178 		return ret;
179 
180 	irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
181 
182 	return xen_evtchn_port_setup(info);
183 }
184 
185 static int xen_irq_info_evtchn_setup(unsigned irq,
186 				     unsigned evtchn)
187 {
188 	struct irq_info *info = info_for_irq(irq);
189 
190 	return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
191 }
192 
193 static int xen_irq_info_ipi_setup(unsigned cpu,
194 				  unsigned irq,
195 				  unsigned evtchn,
196 				  enum ipi_vector ipi)
197 {
198 	struct irq_info *info = info_for_irq(irq);
199 
200 	info->u.ipi = ipi;
201 
202 	per_cpu(ipi_to_irq, cpu)[ipi] = irq;
203 
204 	return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
205 }
206 
207 static int xen_irq_info_virq_setup(unsigned cpu,
208 				   unsigned irq,
209 				   unsigned evtchn,
210 				   unsigned virq)
211 {
212 	struct irq_info *info = info_for_irq(irq);
213 
214 	info->u.virq = virq;
215 
216 	per_cpu(virq_to_irq, cpu)[virq] = irq;
217 
218 	return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
219 }
220 
221 static int xen_irq_info_pirq_setup(unsigned irq,
222 				   unsigned evtchn,
223 				   unsigned pirq,
224 				   unsigned gsi,
225 				   uint16_t domid,
226 				   unsigned char flags)
227 {
228 	struct irq_info *info = info_for_irq(irq);
229 
230 	info->u.pirq.pirq = pirq;
231 	info->u.pirq.gsi = gsi;
232 	info->u.pirq.domid = domid;
233 	info->u.pirq.flags = flags;
234 
235 	return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
236 }
237 
238 static void xen_irq_info_cleanup(struct irq_info *info)
239 {
240 	set_evtchn_to_irq(info->evtchn, -1);
241 	info->evtchn = 0;
242 }
243 
244 /*
245  * Accessors for packed IRQ information.
246  */
247 unsigned int evtchn_from_irq(unsigned irq)
248 {
249 	if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
250 		return 0;
251 
252 	return info_for_irq(irq)->evtchn;
253 }
254 
255 unsigned irq_from_evtchn(unsigned int evtchn)
256 {
257 	return get_evtchn_to_irq(evtchn);
258 }
259 EXPORT_SYMBOL_GPL(irq_from_evtchn);
260 
261 int irq_from_virq(unsigned int cpu, unsigned int virq)
262 {
263 	return per_cpu(virq_to_irq, cpu)[virq];
264 }
265 
266 static enum ipi_vector ipi_from_irq(unsigned irq)
267 {
268 	struct irq_info *info = info_for_irq(irq);
269 
270 	BUG_ON(info == NULL);
271 	BUG_ON(info->type != IRQT_IPI);
272 
273 	return info->u.ipi;
274 }
275 
276 static unsigned virq_from_irq(unsigned irq)
277 {
278 	struct irq_info *info = info_for_irq(irq);
279 
280 	BUG_ON(info == NULL);
281 	BUG_ON(info->type != IRQT_VIRQ);
282 
283 	return info->u.virq;
284 }
285 
286 static unsigned pirq_from_irq(unsigned irq)
287 {
288 	struct irq_info *info = info_for_irq(irq);
289 
290 	BUG_ON(info == NULL);
291 	BUG_ON(info->type != IRQT_PIRQ);
292 
293 	return info->u.pirq.pirq;
294 }
295 
296 static enum xen_irq_type type_from_irq(unsigned irq)
297 {
298 	return info_for_irq(irq)->type;
299 }
300 
301 unsigned cpu_from_irq(unsigned irq)
302 {
303 	return info_for_irq(irq)->cpu;
304 }
305 
306 unsigned int cpu_from_evtchn(unsigned int evtchn)
307 {
308 	int irq = get_evtchn_to_irq(evtchn);
309 	unsigned ret = 0;
310 
311 	if (irq != -1)
312 		ret = cpu_from_irq(irq);
313 
314 	return ret;
315 }
316 
317 #ifdef CONFIG_X86
318 static bool pirq_check_eoi_map(unsigned irq)
319 {
320 	return test_bit(pirq_from_irq(irq), pirq_eoi_map);
321 }
322 #endif
323 
324 static bool pirq_needs_eoi_flag(unsigned irq)
325 {
326 	struct irq_info *info = info_for_irq(irq);
327 	BUG_ON(info->type != IRQT_PIRQ);
328 
329 	return info->u.pirq.flags & PIRQ_NEEDS_EOI;
330 }
331 
332 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
333 {
334 	int irq = get_evtchn_to_irq(chn);
335 	struct irq_info *info = info_for_irq(irq);
336 
337 	BUG_ON(irq == -1);
338 #ifdef CONFIG_SMP
339 	cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
340 #endif
341 
342 	xen_evtchn_port_bind_to_cpu(info, cpu);
343 
344 	info->cpu = cpu;
345 }
346 
347 static void xen_evtchn_mask_all(void)
348 {
349 	unsigned int evtchn;
350 
351 	for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
352 		mask_evtchn(evtchn);
353 }
354 
355 /**
356  * notify_remote_via_irq - send event to remote end of event channel via irq
357  * @irq: irq of event channel to send event to
358  *
359  * Unlike notify_remote_via_evtchn(), this is safe to use across
360  * save/restore. Notifications on a broken connection are silently
361  * dropped.
362  */
363 void notify_remote_via_irq(int irq)
364 {
365 	int evtchn = evtchn_from_irq(irq);
366 
367 	if (VALID_EVTCHN(evtchn))
368 		notify_remote_via_evtchn(evtchn);
369 }
370 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
371 
372 static void xen_irq_init(unsigned irq)
373 {
374 	struct irq_info *info;
375 #ifdef CONFIG_SMP
376 	struct irq_desc *desc = irq_to_desc(irq);
377 
378 	/* By default all event channels notify CPU#0. */
379 	cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
380 #endif
381 
382 	info = kzalloc(sizeof(*info), GFP_KERNEL);
383 	if (info == NULL)
384 		panic("Unable to allocate metadata for IRQ%d\n", irq);
385 
386 	info->type = IRQT_UNBOUND;
387 	info->refcnt = -1;
388 
389 	irq_set_handler_data(irq, info);
390 
391 	list_add_tail(&info->list, &xen_irq_list_head);
392 }
393 
394 static int __must_check xen_allocate_irq_dynamic(void)
395 {
396 	int first = 0;
397 	int irq;
398 
399 #ifdef CONFIG_X86_IO_APIC
400 	/*
401 	 * For an HVM guest or domain 0 which see "real" (emulated or
402 	 * actual respectively) GSIs we allocate dynamic IRQs
403 	 * e.g. those corresponding to event channels or MSIs
404 	 * etc. from the range above those "real" GSIs to avoid
405 	 * collisions.
406 	 */
407 	if (xen_initial_domain() || xen_hvm_domain())
408 		first = get_nr_irqs_gsi();
409 #endif
410 
411 	irq = irq_alloc_desc_from(first, -1);
412 
413 	if (irq >= 0)
414 		xen_irq_init(irq);
415 
416 	return irq;
417 }
418 
419 static int __must_check xen_allocate_irq_gsi(unsigned gsi)
420 {
421 	int irq;
422 
423 	/*
424 	 * A PV guest has no concept of a GSI (since it has no ACPI
425 	 * nor access to/knowledge of the physical APICs). Therefore
426 	 * all IRQs are dynamically allocated from the entire IRQ
427 	 * space.
428 	 */
429 	if (xen_pv_domain() && !xen_initial_domain())
430 		return xen_allocate_irq_dynamic();
431 
432 	/* Legacy IRQ descriptors are already allocated by the arch. */
433 	if (gsi < NR_IRQS_LEGACY)
434 		irq = gsi;
435 	else
436 		irq = irq_alloc_desc_at(gsi, -1);
437 
438 	xen_irq_init(irq);
439 
440 	return irq;
441 }
442 
443 static void xen_free_irq(unsigned irq)
444 {
445 	struct irq_info *info = irq_get_handler_data(irq);
446 
447 	if (WARN_ON(!info))
448 		return;
449 
450 	list_del(&info->list);
451 
452 	irq_set_handler_data(irq, NULL);
453 
454 	WARN_ON(info->refcnt > 0);
455 
456 	kfree(info);
457 
458 	/* Legacy IRQ descriptors are managed by the arch. */
459 	if (irq < NR_IRQS_LEGACY)
460 		return;
461 
462 	irq_free_desc(irq);
463 }
464 
465 static void xen_evtchn_close(unsigned int port)
466 {
467 	struct evtchn_close close;
468 
469 	close.port = port;
470 	if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
471 		BUG();
472 
473 	/* Closed ports are implicitly re-bound to VCPU0. */
474 	bind_evtchn_to_cpu(port, 0);
475 }
476 
477 static void pirq_query_unmask(int irq)
478 {
479 	struct physdev_irq_status_query irq_status;
480 	struct irq_info *info = info_for_irq(irq);
481 
482 	BUG_ON(info->type != IRQT_PIRQ);
483 
484 	irq_status.irq = pirq_from_irq(irq);
485 	if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
486 		irq_status.flags = 0;
487 
488 	info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
489 	if (irq_status.flags & XENIRQSTAT_needs_eoi)
490 		info->u.pirq.flags |= PIRQ_NEEDS_EOI;
491 }
492 
493 static bool probing_irq(int irq)
494 {
495 	struct irq_desc *desc = irq_to_desc(irq);
496 
497 	return desc && desc->action == NULL;
498 }
499 
500 static void eoi_pirq(struct irq_data *data)
501 {
502 	int evtchn = evtchn_from_irq(data->irq);
503 	struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
504 	int rc = 0;
505 
506 	irq_move_irq(data);
507 
508 	if (VALID_EVTCHN(evtchn))
509 		clear_evtchn(evtchn);
510 
511 	if (pirq_needs_eoi(data->irq)) {
512 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
513 		WARN_ON(rc);
514 	}
515 }
516 
517 static void mask_ack_pirq(struct irq_data *data)
518 {
519 	disable_dynirq(data);
520 	eoi_pirq(data);
521 }
522 
523 static unsigned int __startup_pirq(unsigned int irq)
524 {
525 	struct evtchn_bind_pirq bind_pirq;
526 	struct irq_info *info = info_for_irq(irq);
527 	int evtchn = evtchn_from_irq(irq);
528 	int rc;
529 
530 	BUG_ON(info->type != IRQT_PIRQ);
531 
532 	if (VALID_EVTCHN(evtchn))
533 		goto out;
534 
535 	bind_pirq.pirq = pirq_from_irq(irq);
536 	/* NB. We are happy to share unless we are probing. */
537 	bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
538 					BIND_PIRQ__WILL_SHARE : 0;
539 	rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
540 	if (rc != 0) {
541 		if (!probing_irq(irq))
542 			pr_info("Failed to obtain physical IRQ %d\n", irq);
543 		return 0;
544 	}
545 	evtchn = bind_pirq.port;
546 
547 	pirq_query_unmask(irq);
548 
549 	rc = set_evtchn_to_irq(evtchn, irq);
550 	if (rc != 0) {
551 		pr_err("irq%d: Failed to set port to irq mapping (%d)\n",
552 		       irq, rc);
553 		xen_evtchn_close(evtchn);
554 		return 0;
555 	}
556 	bind_evtchn_to_cpu(evtchn, 0);
557 	info->evtchn = evtchn;
558 
559 out:
560 	unmask_evtchn(evtchn);
561 	eoi_pirq(irq_get_irq_data(irq));
562 
563 	return 0;
564 }
565 
566 static unsigned int startup_pirq(struct irq_data *data)
567 {
568 	return __startup_pirq(data->irq);
569 }
570 
571 static void shutdown_pirq(struct irq_data *data)
572 {
573 	unsigned int irq = data->irq;
574 	struct irq_info *info = info_for_irq(irq);
575 	unsigned evtchn = evtchn_from_irq(irq);
576 
577 	BUG_ON(info->type != IRQT_PIRQ);
578 
579 	if (!VALID_EVTCHN(evtchn))
580 		return;
581 
582 	mask_evtchn(evtchn);
583 	xen_evtchn_close(evtchn);
584 	xen_irq_info_cleanup(info);
585 }
586 
587 static void enable_pirq(struct irq_data *data)
588 {
589 	startup_pirq(data);
590 }
591 
592 static void disable_pirq(struct irq_data *data)
593 {
594 	disable_dynirq(data);
595 }
596 
597 int xen_irq_from_gsi(unsigned gsi)
598 {
599 	struct irq_info *info;
600 
601 	list_for_each_entry(info, &xen_irq_list_head, list) {
602 		if (info->type != IRQT_PIRQ)
603 			continue;
604 
605 		if (info->u.pirq.gsi == gsi)
606 			return info->irq;
607 	}
608 
609 	return -1;
610 }
611 EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
612 
613 static void __unbind_from_irq(unsigned int irq)
614 {
615 	int evtchn = evtchn_from_irq(irq);
616 	struct irq_info *info = irq_get_handler_data(irq);
617 
618 	if (info->refcnt > 0) {
619 		info->refcnt--;
620 		if (info->refcnt != 0)
621 			return;
622 	}
623 
624 	if (VALID_EVTCHN(evtchn)) {
625 		unsigned int cpu = cpu_from_irq(irq);
626 
627 		xen_evtchn_close(evtchn);
628 
629 		switch (type_from_irq(irq)) {
630 		case IRQT_VIRQ:
631 			per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
632 			break;
633 		case IRQT_IPI:
634 			per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
635 			break;
636 		default:
637 			break;
638 		}
639 
640 		xen_irq_info_cleanup(info);
641 	}
642 
643 	BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
644 
645 	xen_free_irq(irq);
646 }
647 
648 /*
649  * Do not make any assumptions regarding the relationship between the
650  * IRQ number returned here and the Xen pirq argument.
651  *
652  * Note: We don't assign an event channel until the irq actually started
653  * up.  Return an existing irq if we've already got one for the gsi.
654  *
655  * Shareable implies level triggered, not shareable implies edge
656  * triggered here.
657  */
658 int xen_bind_pirq_gsi_to_irq(unsigned gsi,
659 			     unsigned pirq, int shareable, char *name)
660 {
661 	int irq = -1;
662 	struct physdev_irq irq_op;
663 	int ret;
664 
665 	mutex_lock(&irq_mapping_update_lock);
666 
667 	irq = xen_irq_from_gsi(gsi);
668 	if (irq != -1) {
669 		pr_info("%s: returning irq %d for gsi %u\n",
670 			__func__, irq, gsi);
671 		goto out;
672 	}
673 
674 	irq = xen_allocate_irq_gsi(gsi);
675 	if (irq < 0)
676 		goto out;
677 
678 	irq_op.irq = irq;
679 	irq_op.vector = 0;
680 
681 	/* Only the privileged domain can do this. For non-priv, the pcifront
682 	 * driver provides a PCI bus that does the call to do exactly
683 	 * this in the priv domain. */
684 	if (xen_initial_domain() &&
685 	    HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
686 		xen_free_irq(irq);
687 		irq = -ENOSPC;
688 		goto out;
689 	}
690 
691 	ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
692 			       shareable ? PIRQ_SHAREABLE : 0);
693 	if (ret < 0) {
694 		__unbind_from_irq(irq);
695 		irq = ret;
696 		goto out;
697 	}
698 
699 	pirq_query_unmask(irq);
700 	/* We try to use the handler with the appropriate semantic for the
701 	 * type of interrupt: if the interrupt is an edge triggered
702 	 * interrupt we use handle_edge_irq.
703 	 *
704 	 * On the other hand if the interrupt is level triggered we use
705 	 * handle_fasteoi_irq like the native code does for this kind of
706 	 * interrupts.
707 	 *
708 	 * Depending on the Xen version, pirq_needs_eoi might return true
709 	 * not only for level triggered interrupts but for edge triggered
710 	 * interrupts too. In any case Xen always honors the eoi mechanism,
711 	 * not injecting any more pirqs of the same kind if the first one
712 	 * hasn't received an eoi yet. Therefore using the fasteoi handler
713 	 * is the right choice either way.
714 	 */
715 	if (shareable)
716 		irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
717 				handle_fasteoi_irq, name);
718 	else
719 		irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
720 				handle_edge_irq, name);
721 
722 out:
723 	mutex_unlock(&irq_mapping_update_lock);
724 
725 	return irq;
726 }
727 
728 #ifdef CONFIG_PCI_MSI
729 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
730 {
731 	int rc;
732 	struct physdev_get_free_pirq op_get_free_pirq;
733 
734 	op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
735 	rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
736 
737 	WARN_ONCE(rc == -ENOSYS,
738 		  "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
739 
740 	return rc ? -1 : op_get_free_pirq.pirq;
741 }
742 
743 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
744 			     int pirq, const char *name, domid_t domid)
745 {
746 	int irq, ret;
747 
748 	mutex_lock(&irq_mapping_update_lock);
749 
750 	irq = xen_allocate_irq_dynamic();
751 	if (irq < 0)
752 		goto out;
753 
754 	irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
755 			name);
756 
757 	ret = xen_irq_info_pirq_setup(irq, 0, pirq, 0, domid, 0);
758 	if (ret < 0)
759 		goto error_irq;
760 	ret = irq_set_msi_desc(irq, msidesc);
761 	if (ret < 0)
762 		goto error_irq;
763 out:
764 	mutex_unlock(&irq_mapping_update_lock);
765 	return irq;
766 error_irq:
767 	__unbind_from_irq(irq);
768 	mutex_unlock(&irq_mapping_update_lock);
769 	return ret;
770 }
771 #endif
772 
773 int xen_destroy_irq(int irq)
774 {
775 	struct irq_desc *desc;
776 	struct physdev_unmap_pirq unmap_irq;
777 	struct irq_info *info = info_for_irq(irq);
778 	int rc = -ENOENT;
779 
780 	mutex_lock(&irq_mapping_update_lock);
781 
782 	desc = irq_to_desc(irq);
783 	if (!desc)
784 		goto out;
785 
786 	if (xen_initial_domain()) {
787 		unmap_irq.pirq = info->u.pirq.pirq;
788 		unmap_irq.domid = info->u.pirq.domid;
789 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
790 		/* If another domain quits without making the pci_disable_msix
791 		 * call, the Xen hypervisor takes care of freeing the PIRQs
792 		 * (free_domain_pirqs).
793 		 */
794 		if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
795 			pr_info("domain %d does not have %d anymore\n",
796 				info->u.pirq.domid, info->u.pirq.pirq);
797 		else if (rc) {
798 			pr_warn("unmap irq failed %d\n", rc);
799 			goto out;
800 		}
801 	}
802 
803 	xen_free_irq(irq);
804 
805 out:
806 	mutex_unlock(&irq_mapping_update_lock);
807 	return rc;
808 }
809 
810 int xen_irq_from_pirq(unsigned pirq)
811 {
812 	int irq;
813 
814 	struct irq_info *info;
815 
816 	mutex_lock(&irq_mapping_update_lock);
817 
818 	list_for_each_entry(info, &xen_irq_list_head, list) {
819 		if (info->type != IRQT_PIRQ)
820 			continue;
821 		irq = info->irq;
822 		if (info->u.pirq.pirq == pirq)
823 			goto out;
824 	}
825 	irq = -1;
826 out:
827 	mutex_unlock(&irq_mapping_update_lock);
828 
829 	return irq;
830 }
831 
832 
833 int xen_pirq_from_irq(unsigned irq)
834 {
835 	return pirq_from_irq(irq);
836 }
837 EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
838 
839 int bind_evtchn_to_irq(unsigned int evtchn)
840 {
841 	int irq;
842 	int ret;
843 
844 	if (evtchn >= xen_evtchn_max_channels())
845 		return -ENOMEM;
846 
847 	mutex_lock(&irq_mapping_update_lock);
848 
849 	irq = get_evtchn_to_irq(evtchn);
850 
851 	if (irq == -1) {
852 		irq = xen_allocate_irq_dynamic();
853 		if (irq < 0)
854 			goto out;
855 
856 		irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
857 					      handle_edge_irq, "event");
858 
859 		ret = xen_irq_info_evtchn_setup(irq, evtchn);
860 		if (ret < 0) {
861 			__unbind_from_irq(irq);
862 			irq = ret;
863 			goto out;
864 		}
865 		/* New interdomain events are bound to VCPU 0. */
866 		bind_evtchn_to_cpu(evtchn, 0);
867 	} else {
868 		struct irq_info *info = info_for_irq(irq);
869 		WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
870 	}
871 
872 out:
873 	mutex_unlock(&irq_mapping_update_lock);
874 
875 	return irq;
876 }
877 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
878 
879 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
880 {
881 	struct evtchn_bind_ipi bind_ipi;
882 	int evtchn, irq;
883 	int ret;
884 
885 	mutex_lock(&irq_mapping_update_lock);
886 
887 	irq = per_cpu(ipi_to_irq, cpu)[ipi];
888 
889 	if (irq == -1) {
890 		irq = xen_allocate_irq_dynamic();
891 		if (irq < 0)
892 			goto out;
893 
894 		irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
895 					      handle_percpu_irq, "ipi");
896 
897 		bind_ipi.vcpu = cpu;
898 		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
899 						&bind_ipi) != 0)
900 			BUG();
901 		evtchn = bind_ipi.port;
902 
903 		ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
904 		if (ret < 0) {
905 			__unbind_from_irq(irq);
906 			irq = ret;
907 			goto out;
908 		}
909 		bind_evtchn_to_cpu(evtchn, cpu);
910 	} else {
911 		struct irq_info *info = info_for_irq(irq);
912 		WARN_ON(info == NULL || info->type != IRQT_IPI);
913 	}
914 
915  out:
916 	mutex_unlock(&irq_mapping_update_lock);
917 	return irq;
918 }
919 
920 static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
921 					  unsigned int remote_port)
922 {
923 	struct evtchn_bind_interdomain bind_interdomain;
924 	int err;
925 
926 	bind_interdomain.remote_dom  = remote_domain;
927 	bind_interdomain.remote_port = remote_port;
928 
929 	err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
930 					  &bind_interdomain);
931 
932 	return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
933 }
934 
935 static int find_virq(unsigned int virq, unsigned int cpu)
936 {
937 	struct evtchn_status status;
938 	int port, rc = -ENOENT;
939 
940 	memset(&status, 0, sizeof(status));
941 	for (port = 0; port < xen_evtchn_max_channels(); port++) {
942 		status.dom = DOMID_SELF;
943 		status.port = port;
944 		rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
945 		if (rc < 0)
946 			continue;
947 		if (status.status != EVTCHNSTAT_virq)
948 			continue;
949 		if (status.u.virq == virq && status.vcpu == cpu) {
950 			rc = port;
951 			break;
952 		}
953 	}
954 	return rc;
955 }
956 
957 /**
958  * xen_evtchn_nr_channels - number of usable event channel ports
959  *
960  * This may be less than the maximum supported by the current
961  * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum
962  * supported.
963  */
964 unsigned xen_evtchn_nr_channels(void)
965 {
966         return evtchn_ops->nr_channels();
967 }
968 EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
969 
970 int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
971 {
972 	struct evtchn_bind_virq bind_virq;
973 	int evtchn, irq, ret;
974 
975 	mutex_lock(&irq_mapping_update_lock);
976 
977 	irq = per_cpu(virq_to_irq, cpu)[virq];
978 
979 	if (irq == -1) {
980 		irq = xen_allocate_irq_dynamic();
981 		if (irq < 0)
982 			goto out;
983 
984 		irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
985 					      handle_percpu_irq, "virq");
986 
987 		bind_virq.virq = virq;
988 		bind_virq.vcpu = cpu;
989 		ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
990 						&bind_virq);
991 		if (ret == 0)
992 			evtchn = bind_virq.port;
993 		else {
994 			if (ret == -EEXIST)
995 				ret = find_virq(virq, cpu);
996 			BUG_ON(ret < 0);
997 			evtchn = ret;
998 		}
999 
1000 		ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
1001 		if (ret < 0) {
1002 			__unbind_from_irq(irq);
1003 			irq = ret;
1004 			goto out;
1005 		}
1006 
1007 		bind_evtchn_to_cpu(evtchn, cpu);
1008 	} else {
1009 		struct irq_info *info = info_for_irq(irq);
1010 		WARN_ON(info == NULL || info->type != IRQT_VIRQ);
1011 	}
1012 
1013 out:
1014 	mutex_unlock(&irq_mapping_update_lock);
1015 
1016 	return irq;
1017 }
1018 
1019 static void unbind_from_irq(unsigned int irq)
1020 {
1021 	mutex_lock(&irq_mapping_update_lock);
1022 	__unbind_from_irq(irq);
1023 	mutex_unlock(&irq_mapping_update_lock);
1024 }
1025 
1026 int bind_evtchn_to_irqhandler(unsigned int evtchn,
1027 			      irq_handler_t handler,
1028 			      unsigned long irqflags,
1029 			      const char *devname, void *dev_id)
1030 {
1031 	int irq, retval;
1032 
1033 	irq = bind_evtchn_to_irq(evtchn);
1034 	if (irq < 0)
1035 		return irq;
1036 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1037 	if (retval != 0) {
1038 		unbind_from_irq(irq);
1039 		return retval;
1040 	}
1041 
1042 	return irq;
1043 }
1044 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
1045 
1046 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
1047 					  unsigned int remote_port,
1048 					  irq_handler_t handler,
1049 					  unsigned long irqflags,
1050 					  const char *devname,
1051 					  void *dev_id)
1052 {
1053 	int irq, retval;
1054 
1055 	irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
1056 	if (irq < 0)
1057 		return irq;
1058 
1059 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1060 	if (retval != 0) {
1061 		unbind_from_irq(irq);
1062 		return retval;
1063 	}
1064 
1065 	return irq;
1066 }
1067 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
1068 
1069 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
1070 			    irq_handler_t handler,
1071 			    unsigned long irqflags, const char *devname, void *dev_id)
1072 {
1073 	int irq, retval;
1074 
1075 	irq = bind_virq_to_irq(virq, cpu);
1076 	if (irq < 0)
1077 		return irq;
1078 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1079 	if (retval != 0) {
1080 		unbind_from_irq(irq);
1081 		return retval;
1082 	}
1083 
1084 	return irq;
1085 }
1086 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
1087 
1088 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1089 			   unsigned int cpu,
1090 			   irq_handler_t handler,
1091 			   unsigned long irqflags,
1092 			   const char *devname,
1093 			   void *dev_id)
1094 {
1095 	int irq, retval;
1096 
1097 	irq = bind_ipi_to_irq(ipi, cpu);
1098 	if (irq < 0)
1099 		return irq;
1100 
1101 	irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
1102 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1103 	if (retval != 0) {
1104 		unbind_from_irq(irq);
1105 		return retval;
1106 	}
1107 
1108 	return irq;
1109 }
1110 
1111 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1112 {
1113 	struct irq_info *info = irq_get_handler_data(irq);
1114 
1115 	if (WARN_ON(!info))
1116 		return;
1117 	free_irq(irq, dev_id);
1118 	unbind_from_irq(irq);
1119 }
1120 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1121 
1122 /**
1123  * xen_set_irq_priority() - set an event channel priority.
1124  * @irq:irq bound to an event channel.
1125  * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN.
1126  */
1127 int xen_set_irq_priority(unsigned irq, unsigned priority)
1128 {
1129 	struct evtchn_set_priority set_priority;
1130 
1131 	set_priority.port = evtchn_from_irq(irq);
1132 	set_priority.priority = priority;
1133 
1134 	return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority,
1135 					   &set_priority);
1136 }
1137 EXPORT_SYMBOL_GPL(xen_set_irq_priority);
1138 
1139 int evtchn_make_refcounted(unsigned int evtchn)
1140 {
1141 	int irq = get_evtchn_to_irq(evtchn);
1142 	struct irq_info *info;
1143 
1144 	if (irq == -1)
1145 		return -ENOENT;
1146 
1147 	info = irq_get_handler_data(irq);
1148 
1149 	if (!info)
1150 		return -ENOENT;
1151 
1152 	WARN_ON(info->refcnt != -1);
1153 
1154 	info->refcnt = 1;
1155 
1156 	return 0;
1157 }
1158 EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
1159 
1160 int evtchn_get(unsigned int evtchn)
1161 {
1162 	int irq;
1163 	struct irq_info *info;
1164 	int err = -ENOENT;
1165 
1166 	if (evtchn >= xen_evtchn_max_channels())
1167 		return -EINVAL;
1168 
1169 	mutex_lock(&irq_mapping_update_lock);
1170 
1171 	irq = get_evtchn_to_irq(evtchn);
1172 	if (irq == -1)
1173 		goto done;
1174 
1175 	info = irq_get_handler_data(irq);
1176 
1177 	if (!info)
1178 		goto done;
1179 
1180 	err = -EINVAL;
1181 	if (info->refcnt <= 0)
1182 		goto done;
1183 
1184 	info->refcnt++;
1185 	err = 0;
1186  done:
1187 	mutex_unlock(&irq_mapping_update_lock);
1188 
1189 	return err;
1190 }
1191 EXPORT_SYMBOL_GPL(evtchn_get);
1192 
1193 void evtchn_put(unsigned int evtchn)
1194 {
1195 	int irq = get_evtchn_to_irq(evtchn);
1196 	if (WARN_ON(irq == -1))
1197 		return;
1198 	unbind_from_irq(irq);
1199 }
1200 EXPORT_SYMBOL_GPL(evtchn_put);
1201 
1202 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1203 {
1204 	int irq;
1205 
1206 #ifdef CONFIG_X86
1207 	if (unlikely(vector == XEN_NMI_VECTOR)) {
1208 		int rc =  HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL);
1209 		if (rc < 0)
1210 			printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
1211 		return;
1212 	}
1213 #endif
1214 	irq = per_cpu(ipi_to_irq, cpu)[vector];
1215 	BUG_ON(irq < 0);
1216 	notify_remote_via_irq(irq);
1217 }
1218 
1219 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
1220 
1221 static void __xen_evtchn_do_upcall(void)
1222 {
1223 	struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1224 	int cpu = get_cpu();
1225 	unsigned count;
1226 
1227 	do {
1228 		vcpu_info->evtchn_upcall_pending = 0;
1229 
1230 		if (__this_cpu_inc_return(xed_nesting_count) - 1)
1231 			goto out;
1232 
1233 		xen_evtchn_handle_events(cpu);
1234 
1235 		BUG_ON(!irqs_disabled());
1236 
1237 		count = __this_cpu_read(xed_nesting_count);
1238 		__this_cpu_write(xed_nesting_count, 0);
1239 	} while (count != 1 || vcpu_info->evtchn_upcall_pending);
1240 
1241 out:
1242 
1243 	put_cpu();
1244 }
1245 
1246 void xen_evtchn_do_upcall(struct pt_regs *regs)
1247 {
1248 	struct pt_regs *old_regs = set_irq_regs(regs);
1249 
1250 	irq_enter();
1251 #ifdef CONFIG_X86
1252 	exit_idle();
1253 #endif
1254 
1255 	__xen_evtchn_do_upcall();
1256 
1257 	irq_exit();
1258 	set_irq_regs(old_regs);
1259 }
1260 
1261 void xen_hvm_evtchn_do_upcall(void)
1262 {
1263 	__xen_evtchn_do_upcall();
1264 }
1265 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
1266 
1267 /* Rebind a new event channel to an existing irq. */
1268 void rebind_evtchn_irq(int evtchn, int irq)
1269 {
1270 	struct irq_info *info = info_for_irq(irq);
1271 
1272 	if (WARN_ON(!info))
1273 		return;
1274 
1275 	/* Make sure the irq is masked, since the new event channel
1276 	   will also be masked. */
1277 	disable_irq(irq);
1278 
1279 	mutex_lock(&irq_mapping_update_lock);
1280 
1281 	/* After resume the irq<->evtchn mappings are all cleared out */
1282 	BUG_ON(get_evtchn_to_irq(evtchn) != -1);
1283 	/* Expect irq to have been bound before,
1284 	   so there should be a proper type */
1285 	BUG_ON(info->type == IRQT_UNBOUND);
1286 
1287 	(void)xen_irq_info_evtchn_setup(irq, evtchn);
1288 
1289 	mutex_unlock(&irq_mapping_update_lock);
1290 
1291 	/* new event channels are always bound to cpu 0 */
1292 	irq_set_affinity(irq, cpumask_of(0));
1293 
1294 	/* Unmask the event channel. */
1295 	enable_irq(irq);
1296 }
1297 
1298 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1299 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1300 {
1301 	struct evtchn_bind_vcpu bind_vcpu;
1302 	int evtchn = evtchn_from_irq(irq);
1303 	int masked;
1304 
1305 	if (!VALID_EVTCHN(evtchn))
1306 		return -1;
1307 
1308 	/*
1309 	 * Events delivered via platform PCI interrupts are always
1310 	 * routed to vcpu 0 and hence cannot be rebound.
1311 	 */
1312 	if (xen_hvm_domain() && !xen_have_vector_callback)
1313 		return -1;
1314 
1315 	/* Send future instances of this interrupt to other vcpu. */
1316 	bind_vcpu.port = evtchn;
1317 	bind_vcpu.vcpu = tcpu;
1318 
1319 	/*
1320 	 * Mask the event while changing the VCPU binding to prevent
1321 	 * it being delivered on an unexpected VCPU.
1322 	 */
1323 	masked = test_and_set_mask(evtchn);
1324 
1325 	/*
1326 	 * If this fails, it usually just indicates that we're dealing with a
1327 	 * virq or IPI channel, which don't actually need to be rebound. Ignore
1328 	 * it, but don't do the xenlinux-level rebind in that case.
1329 	 */
1330 	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1331 		bind_evtchn_to_cpu(evtchn, tcpu);
1332 
1333 	if (!masked)
1334 		unmask_evtchn(evtchn);
1335 
1336 	return 0;
1337 }
1338 
1339 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1340 			    bool force)
1341 {
1342 	unsigned tcpu = cpumask_first(dest);
1343 
1344 	return rebind_irq_to_cpu(data->irq, tcpu);
1345 }
1346 
1347 static int retrigger_evtchn(int evtchn)
1348 {
1349 	int masked;
1350 
1351 	if (!VALID_EVTCHN(evtchn))
1352 		return 0;
1353 
1354 	masked = test_and_set_mask(evtchn);
1355 	set_evtchn(evtchn);
1356 	if (!masked)
1357 		unmask_evtchn(evtchn);
1358 
1359 	return 1;
1360 }
1361 
1362 int resend_irq_on_evtchn(unsigned int irq)
1363 {
1364 	return retrigger_evtchn(evtchn_from_irq(irq));
1365 }
1366 
1367 static void enable_dynirq(struct irq_data *data)
1368 {
1369 	int evtchn = evtchn_from_irq(data->irq);
1370 
1371 	if (VALID_EVTCHN(evtchn))
1372 		unmask_evtchn(evtchn);
1373 }
1374 
1375 static void disable_dynirq(struct irq_data *data)
1376 {
1377 	int evtchn = evtchn_from_irq(data->irq);
1378 
1379 	if (VALID_EVTCHN(evtchn))
1380 		mask_evtchn(evtchn);
1381 }
1382 
1383 static void ack_dynirq(struct irq_data *data)
1384 {
1385 	int evtchn = evtchn_from_irq(data->irq);
1386 
1387 	irq_move_irq(data);
1388 
1389 	if (VALID_EVTCHN(evtchn))
1390 		clear_evtchn(evtchn);
1391 }
1392 
1393 static void mask_ack_dynirq(struct irq_data *data)
1394 {
1395 	disable_dynirq(data);
1396 	ack_dynirq(data);
1397 }
1398 
1399 static int retrigger_dynirq(struct irq_data *data)
1400 {
1401 	return retrigger_evtchn(evtchn_from_irq(data->irq));
1402 }
1403 
1404 static void restore_pirqs(void)
1405 {
1406 	int pirq, rc, irq, gsi;
1407 	struct physdev_map_pirq map_irq;
1408 	struct irq_info *info;
1409 
1410 	list_for_each_entry(info, &xen_irq_list_head, list) {
1411 		if (info->type != IRQT_PIRQ)
1412 			continue;
1413 
1414 		pirq = info->u.pirq.pirq;
1415 		gsi = info->u.pirq.gsi;
1416 		irq = info->irq;
1417 
1418 		/* save/restore of PT devices doesn't work, so at this point the
1419 		 * only devices present are GSI based emulated devices */
1420 		if (!gsi)
1421 			continue;
1422 
1423 		map_irq.domid = DOMID_SELF;
1424 		map_irq.type = MAP_PIRQ_TYPE_GSI;
1425 		map_irq.index = gsi;
1426 		map_irq.pirq = pirq;
1427 
1428 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1429 		if (rc) {
1430 			pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1431 				gsi, irq, pirq, rc);
1432 			xen_free_irq(irq);
1433 			continue;
1434 		}
1435 
1436 		printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1437 
1438 		__startup_pirq(irq);
1439 	}
1440 }
1441 
1442 static void restore_cpu_virqs(unsigned int cpu)
1443 {
1444 	struct evtchn_bind_virq bind_virq;
1445 	int virq, irq, evtchn;
1446 
1447 	for (virq = 0; virq < NR_VIRQS; virq++) {
1448 		if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1449 			continue;
1450 
1451 		BUG_ON(virq_from_irq(irq) != virq);
1452 
1453 		/* Get a new binding from Xen. */
1454 		bind_virq.virq = virq;
1455 		bind_virq.vcpu = cpu;
1456 		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1457 						&bind_virq) != 0)
1458 			BUG();
1459 		evtchn = bind_virq.port;
1460 
1461 		/* Record the new mapping. */
1462 		(void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
1463 		bind_evtchn_to_cpu(evtchn, cpu);
1464 	}
1465 }
1466 
1467 static void restore_cpu_ipis(unsigned int cpu)
1468 {
1469 	struct evtchn_bind_ipi bind_ipi;
1470 	int ipi, irq, evtchn;
1471 
1472 	for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1473 		if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1474 			continue;
1475 
1476 		BUG_ON(ipi_from_irq(irq) != ipi);
1477 
1478 		/* Get a new binding from Xen. */
1479 		bind_ipi.vcpu = cpu;
1480 		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1481 						&bind_ipi) != 0)
1482 			BUG();
1483 		evtchn = bind_ipi.port;
1484 
1485 		/* Record the new mapping. */
1486 		(void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
1487 		bind_evtchn_to_cpu(evtchn, cpu);
1488 	}
1489 }
1490 
1491 /* Clear an irq's pending state, in preparation for polling on it */
1492 void xen_clear_irq_pending(int irq)
1493 {
1494 	int evtchn = evtchn_from_irq(irq);
1495 
1496 	if (VALID_EVTCHN(evtchn))
1497 		clear_evtchn(evtchn);
1498 }
1499 EXPORT_SYMBOL(xen_clear_irq_pending);
1500 void xen_set_irq_pending(int irq)
1501 {
1502 	int evtchn = evtchn_from_irq(irq);
1503 
1504 	if (VALID_EVTCHN(evtchn))
1505 		set_evtchn(evtchn);
1506 }
1507 
1508 bool xen_test_irq_pending(int irq)
1509 {
1510 	int evtchn = evtchn_from_irq(irq);
1511 	bool ret = false;
1512 
1513 	if (VALID_EVTCHN(evtchn))
1514 		ret = test_evtchn(evtchn);
1515 
1516 	return ret;
1517 }
1518 
1519 /* Poll waiting for an irq to become pending with timeout.  In the usual case,
1520  * the irq will be disabled so it won't deliver an interrupt. */
1521 void xen_poll_irq_timeout(int irq, u64 timeout)
1522 {
1523 	evtchn_port_t evtchn = evtchn_from_irq(irq);
1524 
1525 	if (VALID_EVTCHN(evtchn)) {
1526 		struct sched_poll poll;
1527 
1528 		poll.nr_ports = 1;
1529 		poll.timeout = timeout;
1530 		set_xen_guest_handle(poll.ports, &evtchn);
1531 
1532 		if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1533 			BUG();
1534 	}
1535 }
1536 EXPORT_SYMBOL(xen_poll_irq_timeout);
1537 /* Poll waiting for an irq to become pending.  In the usual case, the
1538  * irq will be disabled so it won't deliver an interrupt. */
1539 void xen_poll_irq(int irq)
1540 {
1541 	xen_poll_irq_timeout(irq, 0 /* no timeout */);
1542 }
1543 
1544 /* Check whether the IRQ line is shared with other guests. */
1545 int xen_test_irq_shared(int irq)
1546 {
1547 	struct irq_info *info = info_for_irq(irq);
1548 	struct physdev_irq_status_query irq_status;
1549 
1550 	if (WARN_ON(!info))
1551 		return -ENOENT;
1552 
1553 	irq_status.irq = info->u.pirq.pirq;
1554 
1555 	if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1556 		return 0;
1557 	return !(irq_status.flags & XENIRQSTAT_shared);
1558 }
1559 EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1560 
1561 void xen_irq_resume(void)
1562 {
1563 	unsigned int cpu;
1564 	struct irq_info *info;
1565 
1566 	/* New event-channel space is not 'live' yet. */
1567 	xen_evtchn_mask_all();
1568 	xen_evtchn_resume();
1569 
1570 	/* No IRQ <-> event-channel mappings. */
1571 	list_for_each_entry(info, &xen_irq_list_head, list)
1572 		info->evtchn = 0; /* zap event-channel binding */
1573 
1574 	clear_evtchn_to_irq_all();
1575 
1576 	for_each_possible_cpu(cpu) {
1577 		restore_cpu_virqs(cpu);
1578 		restore_cpu_ipis(cpu);
1579 	}
1580 
1581 	restore_pirqs();
1582 }
1583 
1584 static struct irq_chip xen_dynamic_chip __read_mostly = {
1585 	.name			= "xen-dyn",
1586 
1587 	.irq_disable		= disable_dynirq,
1588 	.irq_mask		= disable_dynirq,
1589 	.irq_unmask		= enable_dynirq,
1590 
1591 	.irq_ack		= ack_dynirq,
1592 	.irq_mask_ack		= mask_ack_dynirq,
1593 
1594 	.irq_set_affinity	= set_affinity_irq,
1595 	.irq_retrigger		= retrigger_dynirq,
1596 };
1597 
1598 static struct irq_chip xen_pirq_chip __read_mostly = {
1599 	.name			= "xen-pirq",
1600 
1601 	.irq_startup		= startup_pirq,
1602 	.irq_shutdown		= shutdown_pirq,
1603 	.irq_enable		= enable_pirq,
1604 	.irq_disable		= disable_pirq,
1605 
1606 	.irq_mask		= disable_dynirq,
1607 	.irq_unmask		= enable_dynirq,
1608 
1609 	.irq_ack		= eoi_pirq,
1610 	.irq_eoi		= eoi_pirq,
1611 	.irq_mask_ack		= mask_ack_pirq,
1612 
1613 	.irq_set_affinity	= set_affinity_irq,
1614 
1615 	.irq_retrigger		= retrigger_dynirq,
1616 };
1617 
1618 static struct irq_chip xen_percpu_chip __read_mostly = {
1619 	.name			= "xen-percpu",
1620 
1621 	.irq_disable		= disable_dynirq,
1622 	.irq_mask		= disable_dynirq,
1623 	.irq_unmask		= enable_dynirq,
1624 
1625 	.irq_ack		= ack_dynirq,
1626 };
1627 
1628 int xen_set_callback_via(uint64_t via)
1629 {
1630 	struct xen_hvm_param a;
1631 	a.domid = DOMID_SELF;
1632 	a.index = HVM_PARAM_CALLBACK_IRQ;
1633 	a.value = via;
1634 	return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1635 }
1636 EXPORT_SYMBOL_GPL(xen_set_callback_via);
1637 
1638 #ifdef CONFIG_XEN_PVHVM
1639 /* Vector callbacks are better than PCI interrupts to receive event
1640  * channel notifications because we can receive vector callbacks on any
1641  * vcpu and we don't need PCI support or APIC interactions. */
1642 void xen_callback_vector(void)
1643 {
1644 	int rc;
1645 	uint64_t callback_via;
1646 	if (xen_have_vector_callback) {
1647 		callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
1648 		rc = xen_set_callback_via(callback_via);
1649 		if (rc) {
1650 			pr_err("Request for Xen HVM callback vector failed\n");
1651 			xen_have_vector_callback = 0;
1652 			return;
1653 		}
1654 		pr_info("Xen HVM callback vector for event delivery is enabled\n");
1655 		/* in the restore case the vector has already been allocated */
1656 		if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
1657 			alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
1658 					xen_hvm_callback_vector);
1659 	}
1660 }
1661 #else
1662 void xen_callback_vector(void) {}
1663 #endif
1664 
1665 #undef MODULE_PARAM_PREFIX
1666 #define MODULE_PARAM_PREFIX "xen."
1667 
1668 static bool fifo_events = true;
1669 module_param(fifo_events, bool, 0);
1670 
1671 void __init xen_init_IRQ(void)
1672 {
1673 	int ret = -EINVAL;
1674 
1675 	if (fifo_events)
1676 		ret = xen_evtchn_fifo_init();
1677 	if (ret < 0)
1678 		xen_evtchn_2l_init();
1679 
1680 	evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
1681 				sizeof(*evtchn_to_irq), GFP_KERNEL);
1682 	BUG_ON(!evtchn_to_irq);
1683 
1684 	/* No event channels are 'live' right now. */
1685 	xen_evtchn_mask_all();
1686 
1687 	pirq_needs_eoi = pirq_needs_eoi_flag;
1688 
1689 #ifdef CONFIG_X86
1690 	if (xen_pv_domain()) {
1691 		irq_ctx_init(smp_processor_id());
1692 		if (xen_initial_domain())
1693 			pci_xen_initial_domain();
1694 	}
1695 	if (xen_feature(XENFEAT_hvm_callback_vector))
1696 		xen_callback_vector();
1697 
1698 	if (xen_hvm_domain()) {
1699 		native_init_IRQ();
1700 		/* pci_xen_hvm_init must be called after native_init_IRQ so that
1701 		 * __acpi_register_gsi can point at the right function */
1702 		pci_xen_hvm_init();
1703 	} else {
1704 		int rc;
1705 		struct physdev_pirq_eoi_gmfn eoi_gmfn;
1706 
1707 		pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
1708 		eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map);
1709 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
1710 		/* TODO: No PVH support for PIRQ EOI */
1711 		if (rc != 0) {
1712 			free_page((unsigned long) pirq_eoi_map);
1713 			pirq_eoi_map = NULL;
1714 		} else
1715 			pirq_needs_eoi = pirq_check_eoi_map;
1716 	}
1717 #endif
1718 }
1719