xref: /openbmc/linux/drivers/xen/events/events_base.c (revision ea459e69)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Xen event channels
4  *
5  * Xen models interrupts with abstract event channels.  Because each
6  * domain gets 1024 event channels, but NR_IRQ is not that large, we
7  * must dynamically map irqs<->event channels.  The event channels
8  * interface with the rest of the kernel by defining a xen interrupt
9  * chip.  When an event is received, it is mapped to an irq and sent
10  * through the normal interrupt processing path.
11  *
12  * There are four kinds of events which can be mapped to an event
13  * channel:
14  *
15  * 1. Inter-domain notifications.  This includes all the virtual
16  *    device events, since they're driven by front-ends in another domain
17  *    (typically dom0).
18  * 2. VIRQs, typically used for timers.  These are per-cpu events.
19  * 3. IPIs.
20  * 4. PIRQs - Hardware interrupts.
21  *
22  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
23  */
24 
25 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
26 
27 #include <linux/linkage.h>
28 #include <linux/interrupt.h>
29 #include <linux/irq.h>
30 #include <linux/moduleparam.h>
31 #include <linux/string.h>
32 #include <linux/memblock.h>
33 #include <linux/slab.h>
34 #include <linux/irqnr.h>
35 #include <linux/pci.h>
36 #include <linux/rcupdate.h>
37 #include <linux/spinlock.h>
38 #include <linux/cpuhotplug.h>
39 #include <linux/atomic.h>
40 #include <linux/ktime.h>
41 
42 #ifdef CONFIG_X86
43 #include <asm/desc.h>
44 #include <asm/ptrace.h>
45 #include <asm/idtentry.h>
46 #include <asm/irq.h>
47 #include <asm/io_apic.h>
48 #include <asm/i8259.h>
49 #include <asm/xen/cpuid.h>
50 #include <asm/xen/pci.h>
51 #endif
52 #include <asm/sync_bitops.h>
53 #include <asm/xen/hypercall.h>
54 #include <asm/xen/hypervisor.h>
55 #include <xen/page.h>
56 
57 #include <xen/xen.h>
58 #include <xen/hvm.h>
59 #include <xen/xen-ops.h>
60 #include <xen/events.h>
61 #include <xen/interface/xen.h>
62 #include <xen/interface/event_channel.h>
63 #include <xen/interface/hvm/hvm_op.h>
64 #include <xen/interface/hvm/params.h>
65 #include <xen/interface/physdev.h>
66 #include <xen/interface/sched.h>
67 #include <xen/interface/vcpu.h>
68 #include <xen/xenbus.h>
69 #include <asm/hw_irq.h>
70 
71 #include "events_internal.h"
72 
73 #undef MODULE_PARAM_PREFIX
74 #define MODULE_PARAM_PREFIX "xen."
75 
76 /* Interrupt types. */
77 enum xen_irq_type {
78 	IRQT_UNBOUND = 0,
79 	IRQT_PIRQ,
80 	IRQT_VIRQ,
81 	IRQT_IPI,
82 	IRQT_EVTCHN
83 };
84 
85 /*
86  * Packed IRQ information:
87  * type - enum xen_irq_type
88  * event channel - irq->event channel mapping
89  * cpu - cpu this event channel is bound to
90  * index - type-specific information:
91  *    PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
92  *           guest, or GSI (real passthrough IRQ) of the device.
93  *    VIRQ - virq number
94  *    IPI - IPI vector
95  *    EVTCHN -
96  */
97 struct irq_info {
98 	struct list_head list;
99 	struct list_head eoi_list;
100 	struct rcu_work rwork;
101 	short refcnt;
102 	u8 spurious_cnt;
103 	u8 is_accounted;
104 	short type;		/* type: IRQT_* */
105 	u8 mask_reason;		/* Why is event channel masked */
106 #define EVT_MASK_REASON_EXPLICIT	0x01
107 #define EVT_MASK_REASON_TEMPORARY	0x02
108 #define EVT_MASK_REASON_EOI_PENDING	0x04
109 	u8 is_active;		/* Is event just being handled? */
110 	unsigned irq;
111 	evtchn_port_t evtchn;   /* event channel */
112 	unsigned short cpu;     /* cpu bound */
113 	unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
114 	unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
115 	u64 eoi_time;           /* Time in jiffies when to EOI. */
116 	raw_spinlock_t lock;
117 	bool is_static;           /* Is event channel static */
118 
119 	union {
120 		unsigned short virq;
121 		enum ipi_vector ipi;
122 		struct {
123 			unsigned short pirq;
124 			unsigned short gsi;
125 			unsigned char vector;
126 			unsigned char flags;
127 			uint16_t domid;
128 		} pirq;
129 		struct xenbus_device *interdomain;
130 	} u;
131 };
132 
133 #define PIRQ_NEEDS_EOI	(1 << 0)
134 #define PIRQ_SHAREABLE	(1 << 1)
135 #define PIRQ_MSI_GROUP	(1 << 2)
136 
137 static uint __read_mostly event_loop_timeout = 2;
138 module_param(event_loop_timeout, uint, 0644);
139 
140 static uint __read_mostly event_eoi_delay = 10;
141 module_param(event_eoi_delay, uint, 0644);
142 
143 const struct evtchn_ops *evtchn_ops;
144 
145 /*
146  * This lock protects updates to the following mapping and reference-count
147  * arrays. The lock does not need to be acquired to read the mapping tables.
148  */
149 static DEFINE_MUTEX(irq_mapping_update_lock);
150 
151 /*
152  * Lock hierarchy:
153  *
154  * irq_mapping_update_lock
155  *   IRQ-desc lock
156  *     percpu eoi_list_lock
157  *       irq_info->lock
158  */
159 
160 static LIST_HEAD(xen_irq_list_head);
161 
162 /* IRQ <-> VIRQ mapping. */
163 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
164 
165 /* IRQ <-> IPI mapping */
166 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
167 /* Cache for IPI event channels - needed for hot cpu unplug (avoid RCU usage). */
168 static DEFINE_PER_CPU(evtchn_port_t [XEN_NR_IPIS], ipi_to_evtchn) = {[0 ... XEN_NR_IPIS-1] = 0};
169 
170 /* Event channel distribution data */
171 static atomic_t channels_on_cpu[NR_CPUS];
172 
173 static int **evtchn_to_irq;
174 #ifdef CONFIG_X86
175 static unsigned long *pirq_eoi_map;
176 #endif
177 static bool (*pirq_needs_eoi)(unsigned irq);
178 
179 #define EVTCHN_ROW(e)  (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
180 #define EVTCHN_COL(e)  (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
181 #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
182 
183 /* Xen will never allocate port zero for any purpose. */
184 #define VALID_EVTCHN(chn)	((chn) != 0)
185 
186 static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY];
187 
188 static struct irq_chip xen_dynamic_chip;
189 static struct irq_chip xen_lateeoi_chip;
190 static struct irq_chip xen_percpu_chip;
191 static struct irq_chip xen_pirq_chip;
192 static void enable_dynirq(struct irq_data *data);
193 static void disable_dynirq(struct irq_data *data);
194 
195 static DEFINE_PER_CPU(unsigned int, irq_epoch);
196 
197 static void clear_evtchn_to_irq_row(int *evtchn_row)
198 {
199 	unsigned col;
200 
201 	for (col = 0; col < EVTCHN_PER_ROW; col++)
202 		WRITE_ONCE(evtchn_row[col], -1);
203 }
204 
205 static void clear_evtchn_to_irq_all(void)
206 {
207 	unsigned row;
208 
209 	for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
210 		if (evtchn_to_irq[row] == NULL)
211 			continue;
212 		clear_evtchn_to_irq_row(evtchn_to_irq[row]);
213 	}
214 }
215 
216 static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
217 {
218 	unsigned row;
219 	unsigned col;
220 	int *evtchn_row;
221 
222 	if (evtchn >= xen_evtchn_max_channels())
223 		return -EINVAL;
224 
225 	row = EVTCHN_ROW(evtchn);
226 	col = EVTCHN_COL(evtchn);
227 
228 	if (evtchn_to_irq[row] == NULL) {
229 		/* Unallocated irq entries return -1 anyway */
230 		if (irq == -1)
231 			return 0;
232 
233 		evtchn_row = (int *) __get_free_pages(GFP_KERNEL, 0);
234 		if (evtchn_row == NULL)
235 			return -ENOMEM;
236 
237 		clear_evtchn_to_irq_row(evtchn_row);
238 
239 		/*
240 		 * We've prepared an empty row for the mapping. If a different
241 		 * thread was faster inserting it, we can drop ours.
242 		 */
243 		if (cmpxchg(&evtchn_to_irq[row], NULL, evtchn_row) != NULL)
244 			free_page((unsigned long) evtchn_row);
245 	}
246 
247 	WRITE_ONCE(evtchn_to_irq[row][col], irq);
248 	return 0;
249 }
250 
251 /* Get info for IRQ */
252 static struct irq_info *info_for_irq(unsigned irq)
253 {
254 	if (irq < nr_legacy_irqs())
255 		return legacy_info_ptrs[irq];
256 	else
257 		return irq_get_chip_data(irq);
258 }
259 
260 static void set_info_for_irq(unsigned int irq, struct irq_info *info)
261 {
262 	if (irq < nr_legacy_irqs())
263 		legacy_info_ptrs[irq] = info;
264 	else
265 		irq_set_chip_data(irq, info);
266 }
267 
268 static struct irq_info *evtchn_to_info(evtchn_port_t evtchn)
269 {
270 	int irq;
271 
272 	if (evtchn >= xen_evtchn_max_channels())
273 		return NULL;
274 	if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
275 		return NULL;
276 	irq = READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
277 
278 	return (irq < 0) ? NULL : info_for_irq(irq);
279 }
280 
281 /* Per CPU channel accounting */
282 static void channels_on_cpu_dec(struct irq_info *info)
283 {
284 	if (!info->is_accounted)
285 		return;
286 
287 	info->is_accounted = 0;
288 
289 	if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
290 		return;
291 
292 	WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], -1 , 0));
293 }
294 
295 static void channels_on_cpu_inc(struct irq_info *info)
296 {
297 	if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
298 		return;
299 
300 	if (WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], 1,
301 					    INT_MAX)))
302 		return;
303 
304 	info->is_accounted = 1;
305 }
306 
307 static void xen_irq_free_desc(unsigned int irq)
308 {
309 	/* Legacy IRQ descriptors are managed by the arch. */
310 	if (irq >= nr_legacy_irqs())
311 		irq_free_desc(irq);
312 }
313 
314 static void delayed_free_irq(struct work_struct *work)
315 {
316 	struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
317 					     rwork);
318 	unsigned int irq = info->irq;
319 
320 	/* Remove the info pointer only now, with no potential users left. */
321 	set_info_for_irq(irq, NULL);
322 
323 	kfree(info);
324 
325 	xen_irq_free_desc(irq);
326 }
327 
328 /* Constructors for packed IRQ information. */
329 static int xen_irq_info_common_setup(struct irq_info *info,
330 				     enum xen_irq_type type,
331 				     evtchn_port_t evtchn,
332 				     unsigned short cpu)
333 {
334 	int ret;
335 
336 	BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
337 
338 	info->type = type;
339 	info->evtchn = evtchn;
340 	info->cpu = cpu;
341 	info->mask_reason = EVT_MASK_REASON_EXPLICIT;
342 	raw_spin_lock_init(&info->lock);
343 
344 	ret = set_evtchn_to_irq(evtchn, info->irq);
345 	if (ret < 0)
346 		return ret;
347 
348 	irq_clear_status_flags(info->irq, IRQ_NOREQUEST | IRQ_NOAUTOEN);
349 
350 	return xen_evtchn_port_setup(evtchn);
351 }
352 
353 static int xen_irq_info_evtchn_setup(struct irq_info *info,
354 				     evtchn_port_t evtchn,
355 				     struct xenbus_device *dev)
356 {
357 	int ret;
358 
359 	ret = xen_irq_info_common_setup(info, IRQT_EVTCHN, evtchn, 0);
360 	info->u.interdomain = dev;
361 	if (dev)
362 		atomic_inc(&dev->event_channels);
363 
364 	return ret;
365 }
366 
367 static int xen_irq_info_ipi_setup(struct irq_info *info, unsigned int cpu,
368 				  evtchn_port_t evtchn, enum ipi_vector ipi)
369 {
370 	info->u.ipi = ipi;
371 
372 	per_cpu(ipi_to_irq, cpu)[ipi] = info->irq;
373 	per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
374 
375 	return xen_irq_info_common_setup(info, IRQT_IPI, evtchn, 0);
376 }
377 
378 static int xen_irq_info_virq_setup(struct irq_info *info, unsigned int cpu,
379 				   evtchn_port_t evtchn, unsigned int virq)
380 {
381 	info->u.virq = virq;
382 
383 	per_cpu(virq_to_irq, cpu)[virq] = info->irq;
384 
385 	return xen_irq_info_common_setup(info, IRQT_VIRQ, evtchn, 0);
386 }
387 
388 static int xen_irq_info_pirq_setup(struct irq_info *info, evtchn_port_t evtchn,
389 				   unsigned int pirq, unsigned int gsi,
390 				   uint16_t domid, unsigned char flags)
391 {
392 	info->u.pirq.pirq = pirq;
393 	info->u.pirq.gsi = gsi;
394 	info->u.pirq.domid = domid;
395 	info->u.pirq.flags = flags;
396 
397 	return xen_irq_info_common_setup(info, IRQT_PIRQ, evtchn, 0);
398 }
399 
400 static void xen_irq_info_cleanup(struct irq_info *info)
401 {
402 	set_evtchn_to_irq(info->evtchn, -1);
403 	xen_evtchn_port_remove(info->evtchn, info->cpu);
404 	info->evtchn = 0;
405 	channels_on_cpu_dec(info);
406 }
407 
408 /*
409  * Accessors for packed IRQ information.
410  */
411 static evtchn_port_t evtchn_from_irq(unsigned int irq)
412 {
413 	const struct irq_info *info = NULL;
414 
415 	if (likely(irq < nr_irqs))
416 		info = info_for_irq(irq);
417 	if (!info)
418 		return 0;
419 
420 	return info->evtchn;
421 }
422 
423 unsigned int irq_from_evtchn(evtchn_port_t evtchn)
424 {
425 	struct irq_info *info = evtchn_to_info(evtchn);
426 
427 	return info ? info->irq : -1;
428 }
429 EXPORT_SYMBOL_GPL(irq_from_evtchn);
430 
431 int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq,
432 			 evtchn_port_t *evtchn)
433 {
434 	int irq = per_cpu(virq_to_irq, cpu)[virq];
435 
436 	*evtchn = evtchn_from_irq(irq);
437 
438 	return irq;
439 }
440 
441 static enum ipi_vector ipi_from_irq(struct irq_info *info)
442 {
443 	BUG_ON(info == NULL);
444 	BUG_ON(info->type != IRQT_IPI);
445 
446 	return info->u.ipi;
447 }
448 
449 static unsigned int virq_from_irq(struct irq_info *info)
450 {
451 	BUG_ON(info == NULL);
452 	BUG_ON(info->type != IRQT_VIRQ);
453 
454 	return info->u.virq;
455 }
456 
457 static unsigned pirq_from_irq(unsigned irq)
458 {
459 	struct irq_info *info = info_for_irq(irq);
460 
461 	BUG_ON(info == NULL);
462 	BUG_ON(info->type != IRQT_PIRQ);
463 
464 	return info->u.pirq.pirq;
465 }
466 
467 unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
468 {
469 	struct irq_info *info = evtchn_to_info(evtchn);
470 
471 	return info ? info->cpu : 0;
472 }
473 
474 static void do_mask(struct irq_info *info, u8 reason)
475 {
476 	unsigned long flags;
477 
478 	raw_spin_lock_irqsave(&info->lock, flags);
479 
480 	if (!info->mask_reason)
481 		mask_evtchn(info->evtchn);
482 
483 	info->mask_reason |= reason;
484 
485 	raw_spin_unlock_irqrestore(&info->lock, flags);
486 }
487 
488 static void do_unmask(struct irq_info *info, u8 reason)
489 {
490 	unsigned long flags;
491 
492 	raw_spin_lock_irqsave(&info->lock, flags);
493 
494 	info->mask_reason &= ~reason;
495 
496 	if (!info->mask_reason)
497 		unmask_evtchn(info->evtchn);
498 
499 	raw_spin_unlock_irqrestore(&info->lock, flags);
500 }
501 
502 #ifdef CONFIG_X86
503 static bool pirq_check_eoi_map(unsigned irq)
504 {
505 	return test_bit(pirq_from_irq(irq), pirq_eoi_map);
506 }
507 #endif
508 
509 static bool pirq_needs_eoi_flag(unsigned irq)
510 {
511 	struct irq_info *info = info_for_irq(irq);
512 	BUG_ON(info->type != IRQT_PIRQ);
513 
514 	return info->u.pirq.flags & PIRQ_NEEDS_EOI;
515 }
516 
517 static void bind_evtchn_to_cpu(struct irq_info *info, unsigned int cpu,
518 			       bool force_affinity)
519 {
520 	if (IS_ENABLED(CONFIG_SMP) && force_affinity) {
521 		struct irq_data *data = irq_get_irq_data(info->irq);
522 
523 		irq_data_update_affinity(data, cpumask_of(cpu));
524 		irq_data_update_effective_affinity(data, cpumask_of(cpu));
525 	}
526 
527 	xen_evtchn_port_bind_to_cpu(info->evtchn, cpu, info->cpu);
528 
529 	channels_on_cpu_dec(info);
530 	info->cpu = cpu;
531 	channels_on_cpu_inc(info);
532 }
533 
534 /**
535  * notify_remote_via_irq - send event to remote end of event channel via irq
536  * @irq: irq of event channel to send event to
537  *
538  * Unlike notify_remote_via_evtchn(), this is safe to use across
539  * save/restore. Notifications on a broken connection are silently
540  * dropped.
541  */
542 void notify_remote_via_irq(int irq)
543 {
544 	evtchn_port_t evtchn = evtchn_from_irq(irq);
545 
546 	if (VALID_EVTCHN(evtchn))
547 		notify_remote_via_evtchn(evtchn);
548 }
549 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
550 
551 struct lateeoi_work {
552 	struct delayed_work delayed;
553 	spinlock_t eoi_list_lock;
554 	struct list_head eoi_list;
555 };
556 
557 static DEFINE_PER_CPU(struct lateeoi_work, lateeoi);
558 
559 static void lateeoi_list_del(struct irq_info *info)
560 {
561 	struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
562 	unsigned long flags;
563 
564 	spin_lock_irqsave(&eoi->eoi_list_lock, flags);
565 	list_del_init(&info->eoi_list);
566 	spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
567 }
568 
569 static void lateeoi_list_add(struct irq_info *info)
570 {
571 	struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
572 	struct irq_info *elem;
573 	u64 now = get_jiffies_64();
574 	unsigned long delay;
575 	unsigned long flags;
576 
577 	if (now < info->eoi_time)
578 		delay = info->eoi_time - now;
579 	else
580 		delay = 1;
581 
582 	spin_lock_irqsave(&eoi->eoi_list_lock, flags);
583 
584 	elem = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
585 					eoi_list);
586 	if (!elem || info->eoi_time < elem->eoi_time) {
587 		list_add(&info->eoi_list, &eoi->eoi_list);
588 		mod_delayed_work_on(info->eoi_cpu, system_wq,
589 				    &eoi->delayed, delay);
590 	} else {
591 		list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) {
592 			if (elem->eoi_time <= info->eoi_time)
593 				break;
594 		}
595 		list_add(&info->eoi_list, &elem->eoi_list);
596 	}
597 
598 	spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
599 }
600 
601 static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
602 {
603 	evtchn_port_t evtchn;
604 	unsigned int cpu;
605 	unsigned int delay = 0;
606 
607 	evtchn = info->evtchn;
608 	if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
609 		return;
610 
611 	if (spurious) {
612 		struct xenbus_device *dev = info->u.interdomain;
613 		unsigned int threshold = 1;
614 
615 		if (dev && dev->spurious_threshold)
616 			threshold = dev->spurious_threshold;
617 
618 		if ((1 << info->spurious_cnt) < (HZ << 2)) {
619 			if (info->spurious_cnt != 0xFF)
620 				info->spurious_cnt++;
621 		}
622 		if (info->spurious_cnt > threshold) {
623 			delay = 1 << (info->spurious_cnt - 1 - threshold);
624 			if (delay > HZ)
625 				delay = HZ;
626 			if (!info->eoi_time)
627 				info->eoi_cpu = smp_processor_id();
628 			info->eoi_time = get_jiffies_64() + delay;
629 			if (dev)
630 				atomic_add(delay, &dev->jiffies_eoi_delayed);
631 		}
632 		if (dev)
633 			atomic_inc(&dev->spurious_events);
634 	} else {
635 		info->spurious_cnt = 0;
636 	}
637 
638 	cpu = info->eoi_cpu;
639 	if (info->eoi_time &&
640 	    (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
641 		lateeoi_list_add(info);
642 		return;
643 	}
644 
645 	info->eoi_time = 0;
646 
647 	/* is_active hasn't been reset yet, do it now. */
648 	smp_store_release(&info->is_active, 0);
649 	do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
650 }
651 
652 static void xen_irq_lateeoi_worker(struct work_struct *work)
653 {
654 	struct lateeoi_work *eoi;
655 	struct irq_info *info;
656 	u64 now = get_jiffies_64();
657 	unsigned long flags;
658 
659 	eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
660 
661 	rcu_read_lock();
662 
663 	while (true) {
664 		spin_lock_irqsave(&eoi->eoi_list_lock, flags);
665 
666 		info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
667 						eoi_list);
668 
669 		if (info == NULL)
670 			break;
671 
672 		if (now < info->eoi_time) {
673 			mod_delayed_work_on(info->eoi_cpu, system_wq,
674 					    &eoi->delayed,
675 					    info->eoi_time - now);
676 			break;
677 		}
678 
679 		list_del_init(&info->eoi_list);
680 
681 		spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
682 
683 		info->eoi_time = 0;
684 
685 		xen_irq_lateeoi_locked(info, false);
686 	}
687 
688 	spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
689 
690 	rcu_read_unlock();
691 }
692 
693 static void xen_cpu_init_eoi(unsigned int cpu)
694 {
695 	struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu);
696 
697 	INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker);
698 	spin_lock_init(&eoi->eoi_list_lock);
699 	INIT_LIST_HEAD(&eoi->eoi_list);
700 }
701 
702 void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
703 {
704 	struct irq_info *info;
705 
706 	rcu_read_lock();
707 
708 	info = info_for_irq(irq);
709 
710 	if (info)
711 		xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
712 
713 	rcu_read_unlock();
714 }
715 EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
716 
717 static struct irq_info *xen_irq_init(unsigned int irq)
718 {
719 	struct irq_info *info;
720 
721 	info = kzalloc(sizeof(*info), GFP_KERNEL);
722 	if (info) {
723 		info->irq = irq;
724 		info->type = IRQT_UNBOUND;
725 		info->refcnt = -1;
726 		INIT_RCU_WORK(&info->rwork, delayed_free_irq);
727 
728 		set_info_for_irq(irq, info);
729 		/*
730 		 * Interrupt affinity setting can be immediate. No point
731 		 * in delaying it until an interrupt is handled.
732 		 */
733 		irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
734 
735 		INIT_LIST_HEAD(&info->eoi_list);
736 		list_add_tail(&info->list, &xen_irq_list_head);
737 	}
738 
739 	return info;
740 }
741 
742 static struct irq_info *xen_allocate_irq_dynamic(void)
743 {
744 	int irq = irq_alloc_desc_from(0, -1);
745 	struct irq_info *info = NULL;
746 
747 	if (irq >= 0) {
748 		info = xen_irq_init(irq);
749 		if (!info)
750 			xen_irq_free_desc(irq);
751 	}
752 
753 	return info;
754 }
755 
756 static struct irq_info *xen_allocate_irq_gsi(unsigned int gsi)
757 {
758 	int irq;
759 	struct irq_info *info;
760 
761 	/*
762 	 * A PV guest has no concept of a GSI (since it has no ACPI
763 	 * nor access to/knowledge of the physical APICs). Therefore
764 	 * all IRQs are dynamically allocated from the entire IRQ
765 	 * space.
766 	 */
767 	if (xen_pv_domain() && !xen_initial_domain())
768 		return xen_allocate_irq_dynamic();
769 
770 	/* Legacy IRQ descriptors are already allocated by the arch. */
771 	if (gsi < nr_legacy_irqs())
772 		irq = gsi;
773 	else
774 		irq = irq_alloc_desc_at(gsi, -1);
775 
776 	info = xen_irq_init(irq);
777 	if (!info)
778 		xen_irq_free_desc(irq);
779 
780 	return info;
781 }
782 
783 static void xen_free_irq(struct irq_info *info)
784 {
785 	if (WARN_ON(!info))
786 		return;
787 
788 	if (!list_empty(&info->eoi_list))
789 		lateeoi_list_del(info);
790 
791 	list_del(&info->list);
792 
793 	WARN_ON(info->refcnt > 0);
794 
795 	queue_rcu_work(system_wq, &info->rwork);
796 }
797 
798 /* Not called for lateeoi events. */
799 static void event_handler_exit(struct irq_info *info)
800 {
801 	smp_store_release(&info->is_active, 0);
802 	clear_evtchn(info->evtchn);
803 }
804 
805 static void pirq_query_unmask(int irq)
806 {
807 	struct physdev_irq_status_query irq_status;
808 	struct irq_info *info = info_for_irq(irq);
809 
810 	BUG_ON(info->type != IRQT_PIRQ);
811 
812 	irq_status.irq = pirq_from_irq(irq);
813 	if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
814 		irq_status.flags = 0;
815 
816 	info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
817 	if (irq_status.flags & XENIRQSTAT_needs_eoi)
818 		info->u.pirq.flags |= PIRQ_NEEDS_EOI;
819 }
820 
821 static void eoi_pirq(struct irq_data *data)
822 {
823 	struct irq_info *info = info_for_irq(data->irq);
824 	evtchn_port_t evtchn = info ? info->evtchn : 0;
825 	struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
826 	int rc = 0;
827 
828 	if (!VALID_EVTCHN(evtchn))
829 		return;
830 
831 	event_handler_exit(info);
832 
833 	if (pirq_needs_eoi(data->irq)) {
834 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
835 		WARN_ON(rc);
836 	}
837 }
838 
839 static void mask_ack_pirq(struct irq_data *data)
840 {
841 	disable_dynirq(data);
842 	eoi_pirq(data);
843 }
844 
845 static unsigned int __startup_pirq(unsigned int irq)
846 {
847 	struct evtchn_bind_pirq bind_pirq;
848 	struct irq_info *info = info_for_irq(irq);
849 	evtchn_port_t evtchn = evtchn_from_irq(irq);
850 	int rc;
851 
852 	BUG_ON(info->type != IRQT_PIRQ);
853 
854 	if (VALID_EVTCHN(evtchn))
855 		goto out;
856 
857 	bind_pirq.pirq = pirq_from_irq(irq);
858 	/* NB. We are happy to share unless we are probing. */
859 	bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
860 					BIND_PIRQ__WILL_SHARE : 0;
861 	rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
862 	if (rc != 0) {
863 		pr_warn("Failed to obtain physical IRQ %d\n", irq);
864 		return 0;
865 	}
866 	evtchn = bind_pirq.port;
867 
868 	pirq_query_unmask(irq);
869 
870 	rc = set_evtchn_to_irq(evtchn, irq);
871 	if (rc)
872 		goto err;
873 
874 	info->evtchn = evtchn;
875 	bind_evtchn_to_cpu(info, 0, false);
876 
877 	rc = xen_evtchn_port_setup(evtchn);
878 	if (rc)
879 		goto err;
880 
881 out:
882 	do_unmask(info, EVT_MASK_REASON_EXPLICIT);
883 
884 	eoi_pirq(irq_get_irq_data(irq));
885 
886 	return 0;
887 
888 err:
889 	pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
890 	xen_evtchn_close(evtchn);
891 	return 0;
892 }
893 
894 static unsigned int startup_pirq(struct irq_data *data)
895 {
896 	return __startup_pirq(data->irq);
897 }
898 
899 static void shutdown_pirq(struct irq_data *data)
900 {
901 	unsigned int irq = data->irq;
902 	struct irq_info *info = info_for_irq(irq);
903 	evtchn_port_t evtchn = evtchn_from_irq(irq);
904 
905 	BUG_ON(info->type != IRQT_PIRQ);
906 
907 	if (!VALID_EVTCHN(evtchn))
908 		return;
909 
910 	do_mask(info, EVT_MASK_REASON_EXPLICIT);
911 	xen_irq_info_cleanup(info);
912 	xen_evtchn_close(evtchn);
913 }
914 
915 static void enable_pirq(struct irq_data *data)
916 {
917 	enable_dynirq(data);
918 }
919 
920 static void disable_pirq(struct irq_data *data)
921 {
922 	disable_dynirq(data);
923 }
924 
925 int xen_irq_from_gsi(unsigned gsi)
926 {
927 	struct irq_info *info;
928 
929 	list_for_each_entry(info, &xen_irq_list_head, list) {
930 		if (info->type != IRQT_PIRQ)
931 			continue;
932 
933 		if (info->u.pirq.gsi == gsi)
934 			return info->irq;
935 	}
936 
937 	return -1;
938 }
939 EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
940 
941 static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
942 {
943 	evtchn_port_t evtchn;
944 	bool close_evtchn = false;
945 
946 	if (!info) {
947 		xen_irq_free_desc(irq);
948 		return;
949 	}
950 
951 	if (info->refcnt > 0) {
952 		info->refcnt--;
953 		if (info->refcnt != 0)
954 			return;
955 	}
956 
957 	evtchn = info->evtchn;
958 
959 	if (VALID_EVTCHN(evtchn)) {
960 		unsigned int cpu = info->cpu;
961 		struct xenbus_device *dev;
962 
963 		if (!info->is_static)
964 			close_evtchn = true;
965 
966 		switch (info->type) {
967 		case IRQT_VIRQ:
968 			per_cpu(virq_to_irq, cpu)[virq_from_irq(info)] = -1;
969 			break;
970 		case IRQT_IPI:
971 			per_cpu(ipi_to_irq, cpu)[ipi_from_irq(info)] = -1;
972 			per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(info)] = 0;
973 			break;
974 		case IRQT_EVTCHN:
975 			dev = info->u.interdomain;
976 			if (dev)
977 				atomic_dec(&dev->event_channels);
978 			break;
979 		default:
980 			break;
981 		}
982 
983 		xen_irq_info_cleanup(info);
984 
985 		if (close_evtchn)
986 			xen_evtchn_close(evtchn);
987 	}
988 
989 	xen_free_irq(info);
990 }
991 
992 /*
993  * Do not make any assumptions regarding the relationship between the
994  * IRQ number returned here and the Xen pirq argument.
995  *
996  * Note: We don't assign an event channel until the irq actually started
997  * up.  Return an existing irq if we've already got one for the gsi.
998  *
999  * Shareable implies level triggered, not shareable implies edge
1000  * triggered here.
1001  */
1002 int xen_bind_pirq_gsi_to_irq(unsigned gsi,
1003 			     unsigned pirq, int shareable, char *name)
1004 {
1005 	struct irq_info *info;
1006 	struct physdev_irq irq_op;
1007 	int ret;
1008 
1009 	mutex_lock(&irq_mapping_update_lock);
1010 
1011 	ret = xen_irq_from_gsi(gsi);
1012 	if (ret != -1) {
1013 		pr_info("%s: returning irq %d for gsi %u\n",
1014 			__func__, ret, gsi);
1015 		goto out;
1016 	}
1017 
1018 	info = xen_allocate_irq_gsi(gsi);
1019 	if (!info)
1020 		goto out;
1021 
1022 	irq_op.irq = info->irq;
1023 	irq_op.vector = 0;
1024 
1025 	/* Only the privileged domain can do this. For non-priv, the pcifront
1026 	 * driver provides a PCI bus that does the call to do exactly
1027 	 * this in the priv domain. */
1028 	if (xen_initial_domain() &&
1029 	    HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
1030 		xen_free_irq(info);
1031 		ret = -ENOSPC;
1032 		goto out;
1033 	}
1034 
1035 	ret = xen_irq_info_pirq_setup(info, 0, pirq, gsi, DOMID_SELF,
1036 			       shareable ? PIRQ_SHAREABLE : 0);
1037 	if (ret < 0) {
1038 		__unbind_from_irq(info, info->irq);
1039 		goto out;
1040 	}
1041 
1042 	pirq_query_unmask(info->irq);
1043 	/* We try to use the handler with the appropriate semantic for the
1044 	 * type of interrupt: if the interrupt is an edge triggered
1045 	 * interrupt we use handle_edge_irq.
1046 	 *
1047 	 * On the other hand if the interrupt is level triggered we use
1048 	 * handle_fasteoi_irq like the native code does for this kind of
1049 	 * interrupts.
1050 	 *
1051 	 * Depending on the Xen version, pirq_needs_eoi might return true
1052 	 * not only for level triggered interrupts but for edge triggered
1053 	 * interrupts too. In any case Xen always honors the eoi mechanism,
1054 	 * not injecting any more pirqs of the same kind if the first one
1055 	 * hasn't received an eoi yet. Therefore using the fasteoi handler
1056 	 * is the right choice either way.
1057 	 */
1058 	if (shareable)
1059 		irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
1060 				handle_fasteoi_irq, name);
1061 	else
1062 		irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
1063 				handle_edge_irq, name);
1064 
1065 	ret = info->irq;
1066 
1067 out:
1068 	mutex_unlock(&irq_mapping_update_lock);
1069 
1070 	return ret;
1071 }
1072 
1073 #ifdef CONFIG_PCI_MSI
1074 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
1075 {
1076 	int rc;
1077 	struct physdev_get_free_pirq op_get_free_pirq;
1078 
1079 	op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
1080 	rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
1081 
1082 	WARN_ONCE(rc == -ENOSYS,
1083 		  "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
1084 
1085 	return rc ? -1 : op_get_free_pirq.pirq;
1086 }
1087 
1088 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
1089 			     int pirq, int nvec, const char *name, domid_t domid)
1090 {
1091 	int i, irq, ret;
1092 	struct irq_info *info;
1093 
1094 	mutex_lock(&irq_mapping_update_lock);
1095 
1096 	irq = irq_alloc_descs(-1, 0, nvec, -1);
1097 	if (irq < 0)
1098 		goto out;
1099 
1100 	for (i = 0; i < nvec; i++) {
1101 		info = xen_irq_init(irq + i);
1102 		if (!info)
1103 			goto error_irq;
1104 
1105 		irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
1106 
1107 		ret = xen_irq_info_pirq_setup(info, 0, pirq + i, 0, domid,
1108 					      i == 0 ? 0 : PIRQ_MSI_GROUP);
1109 		if (ret < 0)
1110 			goto error_irq;
1111 	}
1112 
1113 	ret = irq_set_msi_desc(irq, msidesc);
1114 	if (ret < 0)
1115 		goto error_irq;
1116 out:
1117 	mutex_unlock(&irq_mapping_update_lock);
1118 	return irq;
1119 
1120 error_irq:
1121 	while (nvec--) {
1122 		info = info_for_irq(irq + nvec);
1123 		__unbind_from_irq(info, irq + nvec);
1124 	}
1125 	mutex_unlock(&irq_mapping_update_lock);
1126 	return ret;
1127 }
1128 #endif
1129 
1130 int xen_destroy_irq(int irq)
1131 {
1132 	struct physdev_unmap_pirq unmap_irq;
1133 	struct irq_info *info = info_for_irq(irq);
1134 	int rc = -ENOENT;
1135 
1136 	mutex_lock(&irq_mapping_update_lock);
1137 
1138 	/*
1139 	 * If trying to remove a vector in a MSI group different
1140 	 * than the first one skip the PIRQ unmap unless this vector
1141 	 * is the first one in the group.
1142 	 */
1143 	if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) {
1144 		unmap_irq.pirq = info->u.pirq.pirq;
1145 		unmap_irq.domid = info->u.pirq.domid;
1146 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
1147 		/* If another domain quits without making the pci_disable_msix
1148 		 * call, the Xen hypervisor takes care of freeing the PIRQs
1149 		 * (free_domain_pirqs).
1150 		 */
1151 		if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
1152 			pr_info("domain %d does not have %d anymore\n",
1153 				info->u.pirq.domid, info->u.pirq.pirq);
1154 		else if (rc) {
1155 			pr_warn("unmap irq failed %d\n", rc);
1156 			goto out;
1157 		}
1158 	}
1159 
1160 	xen_free_irq(info);
1161 
1162 out:
1163 	mutex_unlock(&irq_mapping_update_lock);
1164 	return rc;
1165 }
1166 
1167 int xen_irq_from_pirq(unsigned pirq)
1168 {
1169 	int irq;
1170 
1171 	struct irq_info *info;
1172 
1173 	mutex_lock(&irq_mapping_update_lock);
1174 
1175 	list_for_each_entry(info, &xen_irq_list_head, list) {
1176 		if (info->type != IRQT_PIRQ)
1177 			continue;
1178 		irq = info->irq;
1179 		if (info->u.pirq.pirq == pirq)
1180 			goto out;
1181 	}
1182 	irq = -1;
1183 out:
1184 	mutex_unlock(&irq_mapping_update_lock);
1185 
1186 	return irq;
1187 }
1188 
1189 
1190 int xen_pirq_from_irq(unsigned irq)
1191 {
1192 	return pirq_from_irq(irq);
1193 }
1194 EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
1195 
1196 static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
1197 				   struct xenbus_device *dev)
1198 {
1199 	int ret = -ENOMEM;
1200 	struct irq_info *info;
1201 
1202 	if (evtchn >= xen_evtchn_max_channels())
1203 		return -ENOMEM;
1204 
1205 	mutex_lock(&irq_mapping_update_lock);
1206 
1207 	info = evtchn_to_info(evtchn);
1208 
1209 	if (!info) {
1210 		info = xen_allocate_irq_dynamic();
1211 		if (!info)
1212 			goto out;
1213 
1214 		irq_set_chip_and_handler_name(info->irq, chip,
1215 					      handle_edge_irq, "event");
1216 
1217 		ret = xen_irq_info_evtchn_setup(info, evtchn, dev);
1218 		if (ret < 0) {
1219 			__unbind_from_irq(info, info->irq);
1220 			goto out;
1221 		}
1222 		/*
1223 		 * New interdomain events are initially bound to vCPU0 This
1224 		 * is required to setup the event channel in the first
1225 		 * place and also important for UP guests because the
1226 		 * affinity setting is not invoked on them so nothing would
1227 		 * bind the channel.
1228 		 */
1229 		bind_evtchn_to_cpu(info, 0, false);
1230 	} else if (!WARN_ON(info->type != IRQT_EVTCHN)) {
1231 		info->refcnt++;
1232 	}
1233 
1234 	ret = info->irq;
1235 
1236 out:
1237 	mutex_unlock(&irq_mapping_update_lock);
1238 
1239 	return ret;
1240 }
1241 
1242 int bind_evtchn_to_irq(evtchn_port_t evtchn)
1243 {
1244 	return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip, NULL);
1245 }
1246 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
1247 
1248 int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
1249 {
1250 	return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip, NULL);
1251 }
1252 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
1253 
1254 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
1255 {
1256 	struct evtchn_bind_ipi bind_ipi;
1257 	evtchn_port_t evtchn;
1258 	struct irq_info *info;
1259 	int ret;
1260 
1261 	mutex_lock(&irq_mapping_update_lock);
1262 
1263 	ret = per_cpu(ipi_to_irq, cpu)[ipi];
1264 
1265 	if (ret == -1) {
1266 		info = xen_allocate_irq_dynamic();
1267 		if (!info)
1268 			goto out;
1269 
1270 		irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
1271 					      handle_percpu_irq, "ipi");
1272 
1273 		bind_ipi.vcpu = xen_vcpu_nr(cpu);
1274 		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1275 						&bind_ipi) != 0)
1276 			BUG();
1277 		evtchn = bind_ipi.port;
1278 
1279 		ret = xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
1280 		if (ret < 0) {
1281 			__unbind_from_irq(info, info->irq);
1282 			goto out;
1283 		}
1284 		/*
1285 		 * Force the affinity mask to the target CPU so proc shows
1286 		 * the correct target.
1287 		 */
1288 		bind_evtchn_to_cpu(info, cpu, true);
1289 		ret = info->irq;
1290 	} else {
1291 		info = info_for_irq(ret);
1292 		WARN_ON(info == NULL || info->type != IRQT_IPI);
1293 	}
1294 
1295  out:
1296 	mutex_unlock(&irq_mapping_update_lock);
1297 	return ret;
1298 }
1299 
1300 static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
1301 					       evtchn_port_t remote_port,
1302 					       struct irq_chip *chip)
1303 {
1304 	struct evtchn_bind_interdomain bind_interdomain;
1305 	int err;
1306 
1307 	bind_interdomain.remote_dom  = dev->otherend_id;
1308 	bind_interdomain.remote_port = remote_port;
1309 
1310 	err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
1311 					  &bind_interdomain);
1312 
1313 	return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port,
1314 					       chip, dev);
1315 }
1316 
1317 int bind_interdomain_evtchn_to_irq_lateeoi(struct xenbus_device *dev,
1318 					   evtchn_port_t remote_port)
1319 {
1320 	return bind_interdomain_evtchn_to_irq_chip(dev, remote_port,
1321 						   &xen_lateeoi_chip);
1322 }
1323 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
1324 
1325 static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
1326 {
1327 	struct evtchn_status status;
1328 	evtchn_port_t port;
1329 	int rc = -ENOENT;
1330 
1331 	memset(&status, 0, sizeof(status));
1332 	for (port = 0; port < xen_evtchn_max_channels(); port++) {
1333 		status.dom = DOMID_SELF;
1334 		status.port = port;
1335 		rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
1336 		if (rc < 0)
1337 			continue;
1338 		if (status.status != EVTCHNSTAT_virq)
1339 			continue;
1340 		if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
1341 			*evtchn = port;
1342 			break;
1343 		}
1344 	}
1345 	return rc;
1346 }
1347 
1348 /**
1349  * xen_evtchn_nr_channels - number of usable event channel ports
1350  *
1351  * This may be less than the maximum supported by the current
1352  * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum
1353  * supported.
1354  */
1355 unsigned xen_evtchn_nr_channels(void)
1356 {
1357         return evtchn_ops->nr_channels();
1358 }
1359 EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
1360 
1361 int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
1362 {
1363 	struct evtchn_bind_virq bind_virq;
1364 	evtchn_port_t evtchn = 0;
1365 	struct irq_info *info;
1366 	int ret;
1367 
1368 	mutex_lock(&irq_mapping_update_lock);
1369 
1370 	ret = per_cpu(virq_to_irq, cpu)[virq];
1371 
1372 	if (ret == -1) {
1373 		info = xen_allocate_irq_dynamic();
1374 		if (!info)
1375 			goto out;
1376 
1377 		if (percpu)
1378 			irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
1379 						      handle_percpu_irq, "virq");
1380 		else
1381 			irq_set_chip_and_handler_name(info->irq, &xen_dynamic_chip,
1382 						      handle_edge_irq, "virq");
1383 
1384 		bind_virq.virq = virq;
1385 		bind_virq.vcpu = xen_vcpu_nr(cpu);
1386 		ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1387 						&bind_virq);
1388 		if (ret == 0)
1389 			evtchn = bind_virq.port;
1390 		else {
1391 			if (ret == -EEXIST)
1392 				ret = find_virq(virq, cpu, &evtchn);
1393 			BUG_ON(ret < 0);
1394 		}
1395 
1396 		ret = xen_irq_info_virq_setup(info, cpu, evtchn, virq);
1397 		if (ret < 0) {
1398 			__unbind_from_irq(info, info->irq);
1399 			goto out;
1400 		}
1401 
1402 		/*
1403 		 * Force the affinity mask for percpu interrupts so proc
1404 		 * shows the correct target.
1405 		 */
1406 		bind_evtchn_to_cpu(info, cpu, percpu);
1407 		ret = info->irq;
1408 	} else {
1409 		info = info_for_irq(ret);
1410 		WARN_ON(info == NULL || info->type != IRQT_VIRQ);
1411 	}
1412 
1413 out:
1414 	mutex_unlock(&irq_mapping_update_lock);
1415 
1416 	return ret;
1417 }
1418 
1419 static void unbind_from_irq(unsigned int irq)
1420 {
1421 	struct irq_info *info;
1422 
1423 	mutex_lock(&irq_mapping_update_lock);
1424 	info = info_for_irq(irq);
1425 	__unbind_from_irq(info, irq);
1426 	mutex_unlock(&irq_mapping_update_lock);
1427 }
1428 
1429 static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
1430 					  irq_handler_t handler,
1431 					  unsigned long irqflags,
1432 					  const char *devname, void *dev_id,
1433 					  struct irq_chip *chip)
1434 {
1435 	int irq, retval;
1436 
1437 	irq = bind_evtchn_to_irq_chip(evtchn, chip, NULL);
1438 	if (irq < 0)
1439 		return irq;
1440 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1441 	if (retval != 0) {
1442 		unbind_from_irq(irq);
1443 		return retval;
1444 	}
1445 
1446 	return irq;
1447 }
1448 
1449 int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
1450 			      irq_handler_t handler,
1451 			      unsigned long irqflags,
1452 			      const char *devname, void *dev_id)
1453 {
1454 	return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
1455 					      devname, dev_id,
1456 					      &xen_dynamic_chip);
1457 }
1458 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
1459 
1460 int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
1461 				      irq_handler_t handler,
1462 				      unsigned long irqflags,
1463 				      const char *devname, void *dev_id)
1464 {
1465 	return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
1466 					      devname, dev_id,
1467 					      &xen_lateeoi_chip);
1468 }
1469 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi);
1470 
1471 static int bind_interdomain_evtchn_to_irqhandler_chip(
1472 		struct xenbus_device *dev, evtchn_port_t remote_port,
1473 		irq_handler_t handler, unsigned long irqflags,
1474 		const char *devname, void *dev_id, struct irq_chip *chip)
1475 {
1476 	int irq, retval;
1477 
1478 	irq = bind_interdomain_evtchn_to_irq_chip(dev, remote_port, chip);
1479 	if (irq < 0)
1480 		return irq;
1481 
1482 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1483 	if (retval != 0) {
1484 		unbind_from_irq(irq);
1485 		return retval;
1486 	}
1487 
1488 	return irq;
1489 }
1490 
1491 int bind_interdomain_evtchn_to_irqhandler_lateeoi(struct xenbus_device *dev,
1492 						  evtchn_port_t remote_port,
1493 						  irq_handler_t handler,
1494 						  unsigned long irqflags,
1495 						  const char *devname,
1496 						  void *dev_id)
1497 {
1498 	return bind_interdomain_evtchn_to_irqhandler_chip(dev,
1499 				remote_port, handler, irqflags, devname,
1500 				dev_id, &xen_lateeoi_chip);
1501 }
1502 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler_lateeoi);
1503 
1504 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
1505 			    irq_handler_t handler,
1506 			    unsigned long irqflags, const char *devname, void *dev_id)
1507 {
1508 	int irq, retval;
1509 
1510 	irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
1511 	if (irq < 0)
1512 		return irq;
1513 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1514 	if (retval != 0) {
1515 		unbind_from_irq(irq);
1516 		return retval;
1517 	}
1518 
1519 	return irq;
1520 }
1521 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
1522 
1523 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1524 			   unsigned int cpu,
1525 			   irq_handler_t handler,
1526 			   unsigned long irqflags,
1527 			   const char *devname,
1528 			   void *dev_id)
1529 {
1530 	int irq, retval;
1531 
1532 	irq = bind_ipi_to_irq(ipi, cpu);
1533 	if (irq < 0)
1534 		return irq;
1535 
1536 	irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
1537 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1538 	if (retval != 0) {
1539 		unbind_from_irq(irq);
1540 		return retval;
1541 	}
1542 
1543 	return irq;
1544 }
1545 
1546 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1547 {
1548 	struct irq_info *info = info_for_irq(irq);
1549 
1550 	if (WARN_ON(!info))
1551 		return;
1552 	free_irq(irq, dev_id);
1553 	unbind_from_irq(irq);
1554 }
1555 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1556 
1557 /**
1558  * xen_set_irq_priority() - set an event channel priority.
1559  * @irq:irq bound to an event channel.
1560  * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN.
1561  */
1562 int xen_set_irq_priority(unsigned irq, unsigned priority)
1563 {
1564 	struct evtchn_set_priority set_priority;
1565 
1566 	set_priority.port = evtchn_from_irq(irq);
1567 	set_priority.priority = priority;
1568 
1569 	return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority,
1570 					   &set_priority);
1571 }
1572 EXPORT_SYMBOL_GPL(xen_set_irq_priority);
1573 
1574 int evtchn_make_refcounted(evtchn_port_t evtchn, bool is_static)
1575 {
1576 	struct irq_info *info = evtchn_to_info(evtchn);
1577 
1578 	if (!info)
1579 		return -ENOENT;
1580 
1581 	WARN_ON(info->refcnt != -1);
1582 
1583 	info->refcnt = 1;
1584 	info->is_static = is_static;
1585 
1586 	return 0;
1587 }
1588 EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
1589 
1590 int evtchn_get(evtchn_port_t evtchn)
1591 {
1592 	struct irq_info *info;
1593 	int err = -ENOENT;
1594 
1595 	if (evtchn >= xen_evtchn_max_channels())
1596 		return -EINVAL;
1597 
1598 	mutex_lock(&irq_mapping_update_lock);
1599 
1600 	info = evtchn_to_info(evtchn);
1601 
1602 	if (!info)
1603 		goto done;
1604 
1605 	err = -EINVAL;
1606 	if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
1607 		goto done;
1608 
1609 	info->refcnt++;
1610 	err = 0;
1611  done:
1612 	mutex_unlock(&irq_mapping_update_lock);
1613 
1614 	return err;
1615 }
1616 EXPORT_SYMBOL_GPL(evtchn_get);
1617 
1618 void evtchn_put(evtchn_port_t evtchn)
1619 {
1620 	struct irq_info *info = evtchn_to_info(evtchn);
1621 
1622 	if (WARN_ON(!info))
1623 		return;
1624 	unbind_from_irq(info->irq);
1625 }
1626 EXPORT_SYMBOL_GPL(evtchn_put);
1627 
1628 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1629 {
1630 	evtchn_port_t evtchn;
1631 
1632 #ifdef CONFIG_X86
1633 	if (unlikely(vector == XEN_NMI_VECTOR)) {
1634 		int rc =  HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu),
1635 					     NULL);
1636 		if (rc < 0)
1637 			printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
1638 		return;
1639 	}
1640 #endif
1641 	evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
1642 	BUG_ON(evtchn == 0);
1643 	notify_remote_via_evtchn(evtchn);
1644 }
1645 
1646 struct evtchn_loop_ctrl {
1647 	ktime_t timeout;
1648 	unsigned count;
1649 	bool defer_eoi;
1650 };
1651 
1652 void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
1653 {
1654 	struct irq_info *info = evtchn_to_info(port);
1655 	struct xenbus_device *dev;
1656 
1657 	if (!info)
1658 		return;
1659 
1660 	/*
1661 	 * Check for timeout every 256 events.
1662 	 * We are setting the timeout value only after the first 256
1663 	 * events in order to not hurt the common case of few loop
1664 	 * iterations. The 256 is basically an arbitrary value.
1665 	 *
1666 	 * In case we are hitting the timeout we need to defer all further
1667 	 * EOIs in order to ensure to leave the event handling loop rather
1668 	 * sooner than later.
1669 	 */
1670 	if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) {
1671 		ktime_t kt = ktime_get();
1672 
1673 		if (!ctrl->timeout) {
1674 			kt = ktime_add_ms(kt,
1675 					  jiffies_to_msecs(event_loop_timeout));
1676 			ctrl->timeout = kt;
1677 		} else if (kt > ctrl->timeout) {
1678 			ctrl->defer_eoi = true;
1679 		}
1680 	}
1681 
1682 	if (xchg_acquire(&info->is_active, 1))
1683 		return;
1684 
1685 	dev = (info->type == IRQT_EVTCHN) ? info->u.interdomain : NULL;
1686 	if (dev)
1687 		atomic_inc(&dev->events);
1688 
1689 	if (ctrl->defer_eoi) {
1690 		info->eoi_cpu = smp_processor_id();
1691 		info->irq_epoch = __this_cpu_read(irq_epoch);
1692 		info->eoi_time = get_jiffies_64() + event_eoi_delay;
1693 	}
1694 
1695 	generic_handle_irq(info->irq);
1696 }
1697 
1698 int xen_evtchn_do_upcall(void)
1699 {
1700 	struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1701 	int ret = vcpu_info->evtchn_upcall_pending ? IRQ_HANDLED : IRQ_NONE;
1702 	int cpu = smp_processor_id();
1703 	struct evtchn_loop_ctrl ctrl = { 0 };
1704 
1705 	/*
1706 	 * When closing an event channel the associated IRQ must not be freed
1707 	 * until all cpus have left the event handling loop. This is ensured
1708 	 * by taking the rcu_read_lock() while handling events, as freeing of
1709 	 * the IRQ is handled via queue_rcu_work() _after_ closing the event
1710 	 * channel.
1711 	 */
1712 	rcu_read_lock();
1713 
1714 	do {
1715 		vcpu_info->evtchn_upcall_pending = 0;
1716 
1717 		xen_evtchn_handle_events(cpu, &ctrl);
1718 
1719 		BUG_ON(!irqs_disabled());
1720 
1721 		virt_rmb(); /* Hypervisor can set upcall pending. */
1722 
1723 	} while (vcpu_info->evtchn_upcall_pending);
1724 
1725 	rcu_read_unlock();
1726 
1727 	/*
1728 	 * Increment irq_epoch only now to defer EOIs only for
1729 	 * xen_irq_lateeoi() invocations occurring from inside the loop
1730 	 * above.
1731 	 */
1732 	__this_cpu_inc(irq_epoch);
1733 
1734 	return ret;
1735 }
1736 EXPORT_SYMBOL_GPL(xen_evtchn_do_upcall);
1737 
1738 /* Rebind a new event channel to an existing irq. */
1739 void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
1740 {
1741 	struct irq_info *info = info_for_irq(irq);
1742 
1743 	if (WARN_ON(!info))
1744 		return;
1745 
1746 	/* Make sure the irq is masked, since the new event channel
1747 	   will also be masked. */
1748 	disable_irq(irq);
1749 
1750 	mutex_lock(&irq_mapping_update_lock);
1751 
1752 	/* After resume the irq<->evtchn mappings are all cleared out */
1753 	BUG_ON(evtchn_to_info(evtchn));
1754 	/* Expect irq to have been bound before,
1755 	   so there should be a proper type */
1756 	BUG_ON(info->type == IRQT_UNBOUND);
1757 
1758 	info->irq = irq;
1759 	(void)xen_irq_info_evtchn_setup(info, evtchn, NULL);
1760 
1761 	mutex_unlock(&irq_mapping_update_lock);
1762 
1763 	bind_evtchn_to_cpu(info, info->cpu, false);
1764 
1765 	/* Unmask the event channel. */
1766 	enable_irq(irq);
1767 }
1768 
1769 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1770 static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
1771 {
1772 	struct evtchn_bind_vcpu bind_vcpu;
1773 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1774 
1775 	if (!VALID_EVTCHN(evtchn))
1776 		return -1;
1777 
1778 	if (!xen_support_evtchn_rebind())
1779 		return -1;
1780 
1781 	/* Send future instances of this interrupt to other vcpu. */
1782 	bind_vcpu.port = evtchn;
1783 	bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
1784 
1785 	/*
1786 	 * Mask the event while changing the VCPU binding to prevent
1787 	 * it being delivered on an unexpected VCPU.
1788 	 */
1789 	do_mask(info, EVT_MASK_REASON_TEMPORARY);
1790 
1791 	/*
1792 	 * If this fails, it usually just indicates that we're dealing with a
1793 	 * virq or IPI channel, which don't actually need to be rebound. Ignore
1794 	 * it, but don't do the xenlinux-level rebind in that case.
1795 	 */
1796 	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1797 		bind_evtchn_to_cpu(info, tcpu, false);
1798 
1799 	do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1800 
1801 	return 0;
1802 }
1803 
1804 /*
1805  * Find the CPU within @dest mask which has the least number of channels
1806  * assigned. This is not precise as the per cpu counts can be modified
1807  * concurrently.
1808  */
1809 static unsigned int select_target_cpu(const struct cpumask *dest)
1810 {
1811 	unsigned int cpu, best_cpu = UINT_MAX, minch = UINT_MAX;
1812 
1813 	for_each_cpu_and(cpu, dest, cpu_online_mask) {
1814 		unsigned int curch = atomic_read(&channels_on_cpu[cpu]);
1815 
1816 		if (curch < minch) {
1817 			minch = curch;
1818 			best_cpu = cpu;
1819 		}
1820 	}
1821 
1822 	/*
1823 	 * Catch the unlikely case that dest contains no online CPUs. Can't
1824 	 * recurse.
1825 	 */
1826 	if (best_cpu == UINT_MAX)
1827 		return select_target_cpu(cpu_online_mask);
1828 
1829 	return best_cpu;
1830 }
1831 
1832 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1833 			    bool force)
1834 {
1835 	unsigned int tcpu = select_target_cpu(dest);
1836 	int ret;
1837 
1838 	ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu);
1839 	if (!ret)
1840 		irq_data_update_effective_affinity(data, cpumask_of(tcpu));
1841 
1842 	return ret;
1843 }
1844 
1845 static void enable_dynirq(struct irq_data *data)
1846 {
1847 	struct irq_info *info = info_for_irq(data->irq);
1848 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1849 
1850 	if (VALID_EVTCHN(evtchn))
1851 		do_unmask(info, EVT_MASK_REASON_EXPLICIT);
1852 }
1853 
1854 static void disable_dynirq(struct irq_data *data)
1855 {
1856 	struct irq_info *info = info_for_irq(data->irq);
1857 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1858 
1859 	if (VALID_EVTCHN(evtchn))
1860 		do_mask(info, EVT_MASK_REASON_EXPLICIT);
1861 }
1862 
1863 static void ack_dynirq(struct irq_data *data)
1864 {
1865 	struct irq_info *info = info_for_irq(data->irq);
1866 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1867 
1868 	if (VALID_EVTCHN(evtchn))
1869 		event_handler_exit(info);
1870 }
1871 
1872 static void mask_ack_dynirq(struct irq_data *data)
1873 {
1874 	disable_dynirq(data);
1875 	ack_dynirq(data);
1876 }
1877 
1878 static void lateeoi_ack_dynirq(struct irq_data *data)
1879 {
1880 	struct irq_info *info = info_for_irq(data->irq);
1881 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1882 
1883 	if (VALID_EVTCHN(evtchn)) {
1884 		do_mask(info, EVT_MASK_REASON_EOI_PENDING);
1885 		/*
1886 		 * Don't call event_handler_exit().
1887 		 * Need to keep is_active non-zero in order to ignore re-raised
1888 		 * events after cpu affinity changes while a lateeoi is pending.
1889 		 */
1890 		clear_evtchn(evtchn);
1891 	}
1892 }
1893 
1894 static void lateeoi_mask_ack_dynirq(struct irq_data *data)
1895 {
1896 	struct irq_info *info = info_for_irq(data->irq);
1897 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1898 
1899 	if (VALID_EVTCHN(evtchn)) {
1900 		do_mask(info, EVT_MASK_REASON_EXPLICIT);
1901 		event_handler_exit(info);
1902 	}
1903 }
1904 
1905 static int retrigger_dynirq(struct irq_data *data)
1906 {
1907 	struct irq_info *info = info_for_irq(data->irq);
1908 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1909 
1910 	if (!VALID_EVTCHN(evtchn))
1911 		return 0;
1912 
1913 	do_mask(info, EVT_MASK_REASON_TEMPORARY);
1914 	set_evtchn(evtchn);
1915 	do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1916 
1917 	return 1;
1918 }
1919 
1920 static void restore_pirqs(void)
1921 {
1922 	int pirq, rc, irq, gsi;
1923 	struct physdev_map_pirq map_irq;
1924 	struct irq_info *info;
1925 
1926 	list_for_each_entry(info, &xen_irq_list_head, list) {
1927 		if (info->type != IRQT_PIRQ)
1928 			continue;
1929 
1930 		pirq = info->u.pirq.pirq;
1931 		gsi = info->u.pirq.gsi;
1932 		irq = info->irq;
1933 
1934 		/* save/restore of PT devices doesn't work, so at this point the
1935 		 * only devices present are GSI based emulated devices */
1936 		if (!gsi)
1937 			continue;
1938 
1939 		map_irq.domid = DOMID_SELF;
1940 		map_irq.type = MAP_PIRQ_TYPE_GSI;
1941 		map_irq.index = gsi;
1942 		map_irq.pirq = pirq;
1943 
1944 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1945 		if (rc) {
1946 			pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1947 				gsi, irq, pirq, rc);
1948 			xen_free_irq(info);
1949 			continue;
1950 		}
1951 
1952 		printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1953 
1954 		__startup_pirq(irq);
1955 	}
1956 }
1957 
1958 static void restore_cpu_virqs(unsigned int cpu)
1959 {
1960 	struct evtchn_bind_virq bind_virq;
1961 	evtchn_port_t evtchn;
1962 	struct irq_info *info;
1963 	int virq, irq;
1964 
1965 	for (virq = 0; virq < NR_VIRQS; virq++) {
1966 		if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1967 			continue;
1968 		info = info_for_irq(irq);
1969 
1970 		BUG_ON(virq_from_irq(info) != virq);
1971 
1972 		/* Get a new binding from Xen. */
1973 		bind_virq.virq = virq;
1974 		bind_virq.vcpu = xen_vcpu_nr(cpu);
1975 		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1976 						&bind_virq) != 0)
1977 			BUG();
1978 		evtchn = bind_virq.port;
1979 
1980 		/* Record the new mapping. */
1981 		xen_irq_info_virq_setup(info, cpu, evtchn, virq);
1982 		/* The affinity mask is still valid */
1983 		bind_evtchn_to_cpu(info, cpu, false);
1984 	}
1985 }
1986 
1987 static void restore_cpu_ipis(unsigned int cpu)
1988 {
1989 	struct evtchn_bind_ipi bind_ipi;
1990 	evtchn_port_t evtchn;
1991 	struct irq_info *info;
1992 	int ipi, irq;
1993 
1994 	for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1995 		if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1996 			continue;
1997 		info = info_for_irq(irq);
1998 
1999 		BUG_ON(ipi_from_irq(info) != ipi);
2000 
2001 		/* Get a new binding from Xen. */
2002 		bind_ipi.vcpu = xen_vcpu_nr(cpu);
2003 		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
2004 						&bind_ipi) != 0)
2005 			BUG();
2006 		evtchn = bind_ipi.port;
2007 
2008 		/* Record the new mapping. */
2009 		xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
2010 		/* The affinity mask is still valid */
2011 		bind_evtchn_to_cpu(info, cpu, false);
2012 	}
2013 }
2014 
2015 /* Clear an irq's pending state, in preparation for polling on it */
2016 void xen_clear_irq_pending(int irq)
2017 {
2018 	struct irq_info *info = info_for_irq(irq);
2019 	evtchn_port_t evtchn = info ? info->evtchn : 0;
2020 
2021 	if (VALID_EVTCHN(evtchn))
2022 		event_handler_exit(info);
2023 }
2024 EXPORT_SYMBOL(xen_clear_irq_pending);
2025 void xen_set_irq_pending(int irq)
2026 {
2027 	evtchn_port_t evtchn = evtchn_from_irq(irq);
2028 
2029 	if (VALID_EVTCHN(evtchn))
2030 		set_evtchn(evtchn);
2031 }
2032 
2033 bool xen_test_irq_pending(int irq)
2034 {
2035 	evtchn_port_t evtchn = evtchn_from_irq(irq);
2036 	bool ret = false;
2037 
2038 	if (VALID_EVTCHN(evtchn))
2039 		ret = test_evtchn(evtchn);
2040 
2041 	return ret;
2042 }
2043 
2044 /* Poll waiting for an irq to become pending with timeout.  In the usual case,
2045  * the irq will be disabled so it won't deliver an interrupt. */
2046 void xen_poll_irq_timeout(int irq, u64 timeout)
2047 {
2048 	evtchn_port_t evtchn = evtchn_from_irq(irq);
2049 
2050 	if (VALID_EVTCHN(evtchn)) {
2051 		struct sched_poll poll;
2052 
2053 		poll.nr_ports = 1;
2054 		poll.timeout = timeout;
2055 		set_xen_guest_handle(poll.ports, &evtchn);
2056 
2057 		if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
2058 			BUG();
2059 	}
2060 }
2061 EXPORT_SYMBOL(xen_poll_irq_timeout);
2062 /* Poll waiting for an irq to become pending.  In the usual case, the
2063  * irq will be disabled so it won't deliver an interrupt. */
2064 void xen_poll_irq(int irq)
2065 {
2066 	xen_poll_irq_timeout(irq, 0 /* no timeout */);
2067 }
2068 
2069 /* Check whether the IRQ line is shared with other guests. */
2070 int xen_test_irq_shared(int irq)
2071 {
2072 	struct irq_info *info = info_for_irq(irq);
2073 	struct physdev_irq_status_query irq_status;
2074 
2075 	if (WARN_ON(!info))
2076 		return -ENOENT;
2077 
2078 	irq_status.irq = info->u.pirq.pirq;
2079 
2080 	if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
2081 		return 0;
2082 	return !(irq_status.flags & XENIRQSTAT_shared);
2083 }
2084 EXPORT_SYMBOL_GPL(xen_test_irq_shared);
2085 
2086 void xen_irq_resume(void)
2087 {
2088 	unsigned int cpu;
2089 	struct irq_info *info;
2090 
2091 	/* New event-channel space is not 'live' yet. */
2092 	xen_evtchn_resume();
2093 
2094 	/* No IRQ <-> event-channel mappings. */
2095 	list_for_each_entry(info, &xen_irq_list_head, list) {
2096 		/* Zap event-channel binding */
2097 		info->evtchn = 0;
2098 		/* Adjust accounting */
2099 		channels_on_cpu_dec(info);
2100 	}
2101 
2102 	clear_evtchn_to_irq_all();
2103 
2104 	for_each_possible_cpu(cpu) {
2105 		restore_cpu_virqs(cpu);
2106 		restore_cpu_ipis(cpu);
2107 	}
2108 
2109 	restore_pirqs();
2110 }
2111 
2112 static struct irq_chip xen_dynamic_chip __read_mostly = {
2113 	.name			= "xen-dyn",
2114 
2115 	.irq_disable		= disable_dynirq,
2116 	.irq_mask		= disable_dynirq,
2117 	.irq_unmask		= enable_dynirq,
2118 
2119 	.irq_ack		= ack_dynirq,
2120 	.irq_mask_ack		= mask_ack_dynirq,
2121 
2122 	.irq_set_affinity	= set_affinity_irq,
2123 	.irq_retrigger		= retrigger_dynirq,
2124 };
2125 
2126 static struct irq_chip xen_lateeoi_chip __read_mostly = {
2127 	/* The chip name needs to contain "xen-dyn" for irqbalance to work. */
2128 	.name			= "xen-dyn-lateeoi",
2129 
2130 	.irq_disable		= disable_dynirq,
2131 	.irq_mask		= disable_dynirq,
2132 	.irq_unmask		= enable_dynirq,
2133 
2134 	.irq_ack		= lateeoi_ack_dynirq,
2135 	.irq_mask_ack		= lateeoi_mask_ack_dynirq,
2136 
2137 	.irq_set_affinity	= set_affinity_irq,
2138 	.irq_retrigger		= retrigger_dynirq,
2139 };
2140 
2141 static struct irq_chip xen_pirq_chip __read_mostly = {
2142 	.name			= "xen-pirq",
2143 
2144 	.irq_startup		= startup_pirq,
2145 	.irq_shutdown		= shutdown_pirq,
2146 	.irq_enable		= enable_pirq,
2147 	.irq_disable		= disable_pirq,
2148 
2149 	.irq_mask		= disable_dynirq,
2150 	.irq_unmask		= enable_dynirq,
2151 
2152 	.irq_ack		= eoi_pirq,
2153 	.irq_eoi		= eoi_pirq,
2154 	.irq_mask_ack		= mask_ack_pirq,
2155 
2156 	.irq_set_affinity	= set_affinity_irq,
2157 
2158 	.irq_retrigger		= retrigger_dynirq,
2159 };
2160 
2161 static struct irq_chip xen_percpu_chip __read_mostly = {
2162 	.name			= "xen-percpu",
2163 
2164 	.irq_disable		= disable_dynirq,
2165 	.irq_mask		= disable_dynirq,
2166 	.irq_unmask		= enable_dynirq,
2167 
2168 	.irq_ack		= ack_dynirq,
2169 };
2170 
2171 #ifdef CONFIG_X86
2172 #ifdef CONFIG_XEN_PVHVM
2173 /* Vector callbacks are better than PCI interrupts to receive event
2174  * channel notifications because we can receive vector callbacks on any
2175  * vcpu and we don't need PCI support or APIC interactions. */
2176 void xen_setup_callback_vector(void)
2177 {
2178 	uint64_t callback_via;
2179 
2180 	if (xen_have_vector_callback) {
2181 		callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
2182 		if (xen_set_callback_via(callback_via)) {
2183 			pr_err("Request for Xen HVM callback vector failed\n");
2184 			xen_have_vector_callback = false;
2185 		}
2186 	}
2187 }
2188 
2189 /*
2190  * Setup per-vCPU vector-type callbacks. If this setup is unavailable,
2191  * fallback to the global vector-type callback.
2192  */
2193 static __init void xen_init_setup_upcall_vector(void)
2194 {
2195 	if (!xen_have_vector_callback)
2196 		return;
2197 
2198 	if ((cpuid_eax(xen_cpuid_base() + 4) & XEN_HVM_CPUID_UPCALL_VECTOR) &&
2199 	    !xen_set_upcall_vector(0))
2200 		xen_percpu_upcall = true;
2201 	else if (xen_feature(XENFEAT_hvm_callback_vector))
2202 		xen_setup_callback_vector();
2203 	else
2204 		xen_have_vector_callback = false;
2205 }
2206 
2207 int xen_set_upcall_vector(unsigned int cpu)
2208 {
2209 	int rc;
2210 	xen_hvm_evtchn_upcall_vector_t op = {
2211 		.vector = HYPERVISOR_CALLBACK_VECTOR,
2212 		.vcpu = per_cpu(xen_vcpu_id, cpu),
2213 	};
2214 
2215 	rc = HYPERVISOR_hvm_op(HVMOP_set_evtchn_upcall_vector, &op);
2216 	if (rc)
2217 		return rc;
2218 
2219 	/* Trick toolstack to think we are enlightened. */
2220 	if (!cpu)
2221 		rc = xen_set_callback_via(1);
2222 
2223 	return rc;
2224 }
2225 
2226 static __init void xen_alloc_callback_vector(void)
2227 {
2228 	if (!xen_have_vector_callback)
2229 		return;
2230 
2231 	pr_info("Xen HVM callback vector for event delivery is enabled\n");
2232 	alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_xen_hvm_callback);
2233 }
2234 #else
2235 void xen_setup_callback_vector(void) {}
2236 static inline void xen_init_setup_upcall_vector(void) {}
2237 int xen_set_upcall_vector(unsigned int cpu) {}
2238 static inline void xen_alloc_callback_vector(void) {}
2239 #endif /* CONFIG_XEN_PVHVM */
2240 #endif /* CONFIG_X86 */
2241 
2242 bool xen_fifo_events = true;
2243 module_param_named(fifo_events, xen_fifo_events, bool, 0);
2244 
2245 static int xen_evtchn_cpu_prepare(unsigned int cpu)
2246 {
2247 	int ret = 0;
2248 
2249 	xen_cpu_init_eoi(cpu);
2250 
2251 	if (evtchn_ops->percpu_init)
2252 		ret = evtchn_ops->percpu_init(cpu);
2253 
2254 	return ret;
2255 }
2256 
2257 static int xen_evtchn_cpu_dead(unsigned int cpu)
2258 {
2259 	int ret = 0;
2260 
2261 	if (evtchn_ops->percpu_deinit)
2262 		ret = evtchn_ops->percpu_deinit(cpu);
2263 
2264 	return ret;
2265 }
2266 
2267 void __init xen_init_IRQ(void)
2268 {
2269 	int ret = -EINVAL;
2270 	evtchn_port_t evtchn;
2271 
2272 	if (xen_fifo_events)
2273 		ret = xen_evtchn_fifo_init();
2274 	if (ret < 0) {
2275 		xen_evtchn_2l_init();
2276 		xen_fifo_events = false;
2277 	}
2278 
2279 	xen_cpu_init_eoi(smp_processor_id());
2280 
2281 	cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
2282 				  "xen/evtchn:prepare",
2283 				  xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
2284 
2285 	evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
2286 				sizeof(*evtchn_to_irq), GFP_KERNEL);
2287 	BUG_ON(!evtchn_to_irq);
2288 
2289 	/* No event channels are 'live' right now. */
2290 	for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
2291 		mask_evtchn(evtchn);
2292 
2293 	pirq_needs_eoi = pirq_needs_eoi_flag;
2294 
2295 #ifdef CONFIG_X86
2296 	if (xen_pv_domain()) {
2297 		if (xen_initial_domain())
2298 			pci_xen_initial_domain();
2299 	}
2300 	xen_init_setup_upcall_vector();
2301 	xen_alloc_callback_vector();
2302 
2303 
2304 	if (xen_hvm_domain()) {
2305 		native_init_IRQ();
2306 		/* pci_xen_hvm_init must be called after native_init_IRQ so that
2307 		 * __acpi_register_gsi can point at the right function */
2308 		pci_xen_hvm_init();
2309 	} else {
2310 		int rc;
2311 		struct physdev_pirq_eoi_gmfn eoi_gmfn;
2312 
2313 		pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
2314 		eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
2315 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
2316 		if (rc != 0) {
2317 			free_page((unsigned long) pirq_eoi_map);
2318 			pirq_eoi_map = NULL;
2319 		} else
2320 			pirq_needs_eoi = pirq_check_eoi_map;
2321 	}
2322 #endif
2323 }
2324