Lines Matching +full:- +full:chn +full:- +full:disabled
1 // SPDX-License-Identifier: GPL-2.0-only
7 * must dynamically map irqs<->event channels. The event channels
15 * 1. Inter-domain notifications. This includes all the virtual
16 * device events, since they're driven by front-ends in another domain
18 * 2. VIRQs, typically used for timers. These are per-cpu events.
20 * 4. PIRQs - Hardware interrupts.
59 #include <xen/xen-ops.h>
87 * type - enum xen_irq_type
88 * event channel - irq->event channel mapping
89 * cpu - cpu this event channel is bound to
90 * index - type-specific information:
91 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
93 * VIRQ - virq number
94 * IPI - IPI vector
95 * EVTCHN -
113 unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
146 * This lock protects updates to the following mapping and reference-count
155 * IRQ-desc lock
157 * irq_info->lock
162 /* IRQ <-> VIRQ mapping. */
163 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
165 /* IRQ <-> IPI mapping */
166 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
167 /* Cache for IPI event channels - needed for hot cpu unplug (avoid RCU usage). */
168 static DEFINE_PER_CPU(evtchn_port_t [XEN_NR_IPIS], ipi_to_evtchn) = {[0 ... XEN_NR_IPIS-1] = 0};
184 #define VALID_EVTCHN(chn) ((chn) != 0) argument
202 WRITE_ONCE(evtchn_row[col], -1); in clear_evtchn_to_irq_row()
223 return -EINVAL; in set_evtchn_to_irq()
229 /* Unallocated irq entries return -1 anyway */ in set_evtchn_to_irq()
230 if (irq == -1) in set_evtchn_to_irq()
235 return -ENOMEM; in set_evtchn_to_irq()
284 if (!info->is_accounted) in channels_on_cpu_dec()
287 info->is_accounted = 0; in channels_on_cpu_dec()
289 if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids)) in channels_on_cpu_dec()
292 WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], -1 , 0)); in channels_on_cpu_dec()
297 if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids)) in channels_on_cpu_inc()
300 if (WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], 1, in channels_on_cpu_inc()
304 info->is_accounted = 1; in channels_on_cpu_inc()
318 unsigned int irq = info->irq; in delayed_free_irq()
336 BUG_ON(info->type != IRQT_UNBOUND && info->type != type); in xen_irq_info_common_setup()
338 info->type = type; in xen_irq_info_common_setup()
339 info->evtchn = evtchn; in xen_irq_info_common_setup()
340 info->cpu = cpu; in xen_irq_info_common_setup()
341 info->mask_reason = EVT_MASK_REASON_EXPLICIT; in xen_irq_info_common_setup()
342 raw_spin_lock_init(&info->lock); in xen_irq_info_common_setup()
344 ret = set_evtchn_to_irq(evtchn, info->irq); in xen_irq_info_common_setup()
348 irq_clear_status_flags(info->irq, IRQ_NOREQUEST | IRQ_NOAUTOEN); in xen_irq_info_common_setup()
360 info->u.interdomain = dev; in xen_irq_info_evtchn_setup()
362 atomic_inc(&dev->event_channels); in xen_irq_info_evtchn_setup()
370 info->u.ipi = ipi; in xen_irq_info_ipi_setup()
372 per_cpu(ipi_to_irq, cpu)[ipi] = info->irq; in xen_irq_info_ipi_setup()
381 info->u.virq = virq; in xen_irq_info_virq_setup()
383 per_cpu(virq_to_irq, cpu)[virq] = info->irq; in xen_irq_info_virq_setup()
392 info->u.pirq.pirq = pirq; in xen_irq_info_pirq_setup()
393 info->u.pirq.gsi = gsi; in xen_irq_info_pirq_setup()
394 info->u.pirq.domid = domid; in xen_irq_info_pirq_setup()
395 info->u.pirq.flags = flags; in xen_irq_info_pirq_setup()
402 set_evtchn_to_irq(info->evtchn, -1); in xen_irq_info_cleanup()
403 xen_evtchn_port_remove(info->evtchn, info->cpu); in xen_irq_info_cleanup()
404 info->evtchn = 0; in xen_irq_info_cleanup()
420 return info->evtchn; in evtchn_from_irq()
427 return info ? info->irq : -1; in irq_from_evtchn()
444 BUG_ON(info->type != IRQT_IPI); in ipi_from_irq()
446 return info->u.ipi; in ipi_from_irq()
452 BUG_ON(info->type != IRQT_VIRQ); in virq_from_irq()
454 return info->u.virq; in virq_from_irq()
462 BUG_ON(info->type != IRQT_PIRQ); in pirq_from_irq()
464 return info->u.pirq.pirq; in pirq_from_irq()
471 return info ? info->cpu : 0; in cpu_from_evtchn()
478 raw_spin_lock_irqsave(&info->lock, flags); in do_mask()
480 if (!info->mask_reason) in do_mask()
481 mask_evtchn(info->evtchn); in do_mask()
483 info->mask_reason |= reason; in do_mask()
485 raw_spin_unlock_irqrestore(&info->lock, flags); in do_mask()
492 raw_spin_lock_irqsave(&info->lock, flags); in do_unmask()
494 info->mask_reason &= ~reason; in do_unmask()
496 if (!info->mask_reason) in do_unmask()
497 unmask_evtchn(info->evtchn); in do_unmask()
499 raw_spin_unlock_irqrestore(&info->lock, flags); in do_unmask()
512 BUG_ON(info->type != IRQT_PIRQ); in pirq_needs_eoi_flag()
514 return info->u.pirq.flags & PIRQ_NEEDS_EOI; in pirq_needs_eoi_flag()
521 struct irq_data *data = irq_get_irq_data(info->irq); in bind_evtchn_to_cpu()
527 xen_evtchn_port_bind_to_cpu(info->evtchn, cpu, info->cpu); in bind_evtchn_to_cpu()
530 info->cpu = cpu; in bind_evtchn_to_cpu()
535 * notify_remote_via_irq - send event to remote end of event channel via irq
561 struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu); in lateeoi_list_del()
564 spin_lock_irqsave(&eoi->eoi_list_lock, flags); in lateeoi_list_del()
565 list_del_init(&info->eoi_list); in lateeoi_list_del()
566 spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); in lateeoi_list_del()
571 struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu); in lateeoi_list_add()
577 if (now < info->eoi_time) in lateeoi_list_add()
578 delay = info->eoi_time - now; in lateeoi_list_add()
582 spin_lock_irqsave(&eoi->eoi_list_lock, flags); in lateeoi_list_add()
584 elem = list_first_entry_or_null(&eoi->eoi_list, struct irq_info, in lateeoi_list_add()
586 if (!elem || info->eoi_time < elem->eoi_time) { in lateeoi_list_add()
587 list_add(&info->eoi_list, &eoi->eoi_list); in lateeoi_list_add()
588 mod_delayed_work_on(info->eoi_cpu, system_wq, in lateeoi_list_add()
589 &eoi->delayed, delay); in lateeoi_list_add()
591 list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) { in lateeoi_list_add()
592 if (elem->eoi_time <= info->eoi_time) in lateeoi_list_add()
595 list_add(&info->eoi_list, &elem->eoi_list); in lateeoi_list_add()
598 spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); in lateeoi_list_add()
607 evtchn = info->evtchn; in xen_irq_lateeoi_locked()
608 if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list)) in xen_irq_lateeoi_locked()
612 struct xenbus_device *dev = info->u.interdomain; in xen_irq_lateeoi_locked()
615 if (dev && dev->spurious_threshold) in xen_irq_lateeoi_locked()
616 threshold = dev->spurious_threshold; in xen_irq_lateeoi_locked()
618 if ((1 << info->spurious_cnt) < (HZ << 2)) { in xen_irq_lateeoi_locked()
619 if (info->spurious_cnt != 0xFF) in xen_irq_lateeoi_locked()
620 info->spurious_cnt++; in xen_irq_lateeoi_locked()
622 if (info->spurious_cnt > threshold) { in xen_irq_lateeoi_locked()
623 delay = 1 << (info->spurious_cnt - 1 - threshold); in xen_irq_lateeoi_locked()
626 if (!info->eoi_time) in xen_irq_lateeoi_locked()
627 info->eoi_cpu = smp_processor_id(); in xen_irq_lateeoi_locked()
628 info->eoi_time = get_jiffies_64() + delay; in xen_irq_lateeoi_locked()
630 atomic_add(delay, &dev->jiffies_eoi_delayed); in xen_irq_lateeoi_locked()
633 atomic_inc(&dev->spurious_events); in xen_irq_lateeoi_locked()
635 info->spurious_cnt = 0; in xen_irq_lateeoi_locked()
638 cpu = info->eoi_cpu; in xen_irq_lateeoi_locked()
639 if (info->eoi_time && in xen_irq_lateeoi_locked()
640 (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) { in xen_irq_lateeoi_locked()
645 info->eoi_time = 0; in xen_irq_lateeoi_locked()
648 smp_store_release(&info->is_active, 0); in xen_irq_lateeoi_locked()
664 spin_lock_irqsave(&eoi->eoi_list_lock, flags); in xen_irq_lateeoi_worker()
666 info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info, in xen_irq_lateeoi_worker()
672 if (now < info->eoi_time) { in xen_irq_lateeoi_worker()
673 mod_delayed_work_on(info->eoi_cpu, system_wq, in xen_irq_lateeoi_worker()
674 &eoi->delayed, in xen_irq_lateeoi_worker()
675 info->eoi_time - now); in xen_irq_lateeoi_worker()
679 list_del_init(&info->eoi_list); in xen_irq_lateeoi_worker()
681 spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); in xen_irq_lateeoi_worker()
683 info->eoi_time = 0; in xen_irq_lateeoi_worker()
688 spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); in xen_irq_lateeoi_worker()
697 INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker); in xen_cpu_init_eoi()
698 spin_lock_init(&eoi->eoi_list_lock); in xen_cpu_init_eoi()
699 INIT_LIST_HEAD(&eoi->eoi_list); in xen_cpu_init_eoi()
723 info->irq = irq; in xen_irq_init()
724 info->type = IRQT_UNBOUND; in xen_irq_init()
725 info->refcnt = -1; in xen_irq_init()
726 INIT_RCU_WORK(&info->rwork, delayed_free_irq); in xen_irq_init()
735 INIT_LIST_HEAD(&info->eoi_list); in xen_irq_init()
736 list_add_tail(&info->list, &xen_irq_list_head); in xen_irq_init()
744 int irq = irq_alloc_desc_from(0, -1); in xen_allocate_irq_dynamic()
774 irq = irq_alloc_desc_at(gsi, -1); in xen_allocate_irq_gsi()
788 if (!list_empty(&info->eoi_list)) in xen_free_irq()
791 list_del(&info->list); in xen_free_irq()
793 WARN_ON(info->refcnt > 0); in xen_free_irq()
795 queue_rcu_work(system_wq, &info->rwork); in xen_free_irq()
801 smp_store_release(&info->is_active, 0); in event_handler_exit()
802 clear_evtchn(info->evtchn); in event_handler_exit()
810 BUG_ON(info->type != IRQT_PIRQ); in pirq_query_unmask()
816 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI; in pirq_query_unmask()
818 info->u.pirq.flags |= PIRQ_NEEDS_EOI; in pirq_query_unmask()
823 struct irq_info *info = info_for_irq(data->irq); in eoi_pirq()
824 evtchn_port_t evtchn = info ? info->evtchn : 0; in eoi_pirq()
825 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; in eoi_pirq()
833 if (pirq_needs_eoi(data->irq)) { in eoi_pirq()
852 BUG_ON(info->type != IRQT_PIRQ); in __startup_pirq()
859 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ? in __startup_pirq()
874 info->evtchn = evtchn; in __startup_pirq()
896 return __startup_pirq(data->irq); in startup_pirq()
901 unsigned int irq = data->irq; in shutdown_pirq()
905 BUG_ON(info->type != IRQT_PIRQ); in shutdown_pirq()
930 if (info->type != IRQT_PIRQ) in xen_irq_from_gsi()
933 if (info->u.pirq.gsi == gsi) in xen_irq_from_gsi()
934 return info->irq; in xen_irq_from_gsi()
937 return -1; in xen_irq_from_gsi()
951 if (info->refcnt > 0) { in __unbind_from_irq()
952 info->refcnt--; in __unbind_from_irq()
953 if (info->refcnt != 0) in __unbind_from_irq()
957 evtchn = info->evtchn; in __unbind_from_irq()
960 unsigned int cpu = info->cpu; in __unbind_from_irq()
963 if (!info->is_static) in __unbind_from_irq()
966 switch (info->type) { in __unbind_from_irq()
968 per_cpu(virq_to_irq, cpu)[virq_from_irq(info)] = -1; in __unbind_from_irq()
971 per_cpu(ipi_to_irq, cpu)[ipi_from_irq(info)] = -1; in __unbind_from_irq()
975 dev = info->u.interdomain; in __unbind_from_irq()
977 atomic_dec(&dev->event_channels); in __unbind_from_irq()
1012 if (ret != -1) { in xen_bind_pirq_gsi_to_irq()
1022 irq_op.irq = info->irq; in xen_bind_pirq_gsi_to_irq()
1025 /* Only the privileged domain can do this. For non-priv, the pcifront in xen_bind_pirq_gsi_to_irq()
1031 ret = -ENOSPC; in xen_bind_pirq_gsi_to_irq()
1038 __unbind_from_irq(info, info->irq); in xen_bind_pirq_gsi_to_irq()
1042 pirq_query_unmask(info->irq); in xen_bind_pirq_gsi_to_irq()
1059 irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip, in xen_bind_pirq_gsi_to_irq()
1062 irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip, in xen_bind_pirq_gsi_to_irq()
1065 ret = info->irq; in xen_bind_pirq_gsi_to_irq()
1082 WARN_ONCE(rc == -ENOSYS, in xen_allocate_pirq_msi()
1085 return rc ? -1 : op_get_free_pirq.pirq; in xen_allocate_pirq_msi()
1096 irq = irq_alloc_descs(-1, 0, nvec, -1); in xen_bind_pirq_msi_to_irq()
1103 ret = -ENOMEM; in xen_bind_pirq_msi_to_irq()
1123 while (nvec--) { in xen_bind_pirq_msi_to_irq()
1136 int rc = -ENOENT; in xen_destroy_irq()
1145 if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) { in xen_destroy_irq()
1146 unmap_irq.pirq = info->u.pirq.pirq; in xen_destroy_irq()
1147 unmap_irq.domid = info->u.pirq.domid; in xen_destroy_irq()
1153 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF)) in xen_destroy_irq()
1155 info->u.pirq.domid, info->u.pirq.pirq); in xen_destroy_irq()
1178 if (info->type != IRQT_PIRQ) in xen_irq_from_pirq()
1180 irq = info->irq; in xen_irq_from_pirq()
1181 if (info->u.pirq.pirq == pirq) in xen_irq_from_pirq()
1184 irq = -1; in xen_irq_from_pirq()
1201 int ret = -ENOMEM; in bind_evtchn_to_irq_chip()
1205 return -ENOMEM; in bind_evtchn_to_irq_chip()
1216 irq_set_chip_and_handler_name(info->irq, chip, in bind_evtchn_to_irq_chip()
1221 __unbind_from_irq(info, info->irq); in bind_evtchn_to_irq_chip()
1232 } else if (!WARN_ON(info->type != IRQT_EVTCHN)) { in bind_evtchn_to_irq_chip()
1233 if (shared && !WARN_ON(info->refcnt < 0)) in bind_evtchn_to_irq_chip()
1234 info->refcnt++; in bind_evtchn_to_irq_chip()
1237 ret = info->irq; in bind_evtchn_to_irq_chip()
1268 if (ret == -1) { in bind_ipi_to_irq()
1273 irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip, in bind_ipi_to_irq()
1284 __unbind_from_irq(info, info->irq); in bind_ipi_to_irq()
1292 ret = info->irq; in bind_ipi_to_irq()
1295 WARN_ON(info == NULL || info->type != IRQT_IPI); in bind_ipi_to_irq()
1311 bind_interdomain.remote_dom = dev->otherend_id; in bind_interdomain_evtchn_to_irq_chip()
1333 int rc = -ENOENT; in find_virq()
1353 * xen_evtchn_nr_channels - number of usable event channel ports
1361 return evtchn_ops->nr_channels(); in xen_evtchn_nr_channels()
1376 if (ret == -1) { in bind_virq_to_irq()
1382 irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip, in bind_virq_to_irq()
1385 irq_set_chip_and_handler_name(info->irq, &xen_dynamic_chip, in bind_virq_to_irq()
1395 if (ret == -EEXIST) in bind_virq_to_irq()
1402 __unbind_from_irq(info, info->irq); in bind_virq_to_irq()
1411 ret = info->irq; in bind_virq_to_irq()
1414 WARN_ON(info == NULL || info->type != IRQT_VIRQ); in bind_virq_to_irq()
1564 * xen_set_irq_priority() - set an event channel priority.
1585 return -ENOENT; in evtchn_make_refcounted()
1587 WARN_ON(info->refcnt != -1); in evtchn_make_refcounted()
1589 info->refcnt = 1; in evtchn_make_refcounted()
1590 info->is_static = is_static; in evtchn_make_refcounted()
1599 int err = -ENOENT; in evtchn_get()
1602 return -EINVAL; in evtchn_get()
1611 err = -EINVAL; in evtchn_get()
1612 if (info->refcnt <= 0 || info->refcnt == SHRT_MAX) in evtchn_get()
1615 info->refcnt++; in evtchn_get()
1630 unbind_from_irq(info->irq); in evtchn_put()
1676 if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) { in handle_irq_for_port()
1679 if (!ctrl->timeout) { in handle_irq_for_port()
1682 ctrl->timeout = kt; in handle_irq_for_port()
1683 } else if (kt > ctrl->timeout) { in handle_irq_for_port()
1684 ctrl->defer_eoi = true; in handle_irq_for_port()
1688 if (xchg_acquire(&info->is_active, 1)) in handle_irq_for_port()
1691 dev = (info->type == IRQT_EVTCHN) ? info->u.interdomain : NULL; in handle_irq_for_port()
1693 atomic_inc(&dev->events); in handle_irq_for_port()
1695 if (ctrl->defer_eoi) { in handle_irq_for_port()
1696 info->eoi_cpu = smp_processor_id(); in handle_irq_for_port()
1697 info->irq_epoch = __this_cpu_read(irq_epoch); in handle_irq_for_port()
1698 info->eoi_time = get_jiffies_64() + event_eoi_delay; in handle_irq_for_port()
1701 generic_handle_irq(info->irq); in handle_irq_for_port()
1707 int ret = vcpu_info->evtchn_upcall_pending ? IRQ_HANDLED : IRQ_NONE; in xen_evtchn_do_upcall()
1721 vcpu_info->evtchn_upcall_pending = 0; in xen_evtchn_do_upcall()
1729 } while (vcpu_info->evtchn_upcall_pending); in xen_evtchn_do_upcall()
1758 /* After resume the irq<->evtchn mappings are all cleared out */ in rebind_evtchn_irq()
1762 BUG_ON(info->type == IRQT_UNBOUND); in rebind_evtchn_irq()
1764 info->irq = irq; in rebind_evtchn_irq()
1769 bind_evtchn_to_cpu(info, info->cpu, false); in rebind_evtchn_irq()
1779 evtchn_port_t evtchn = info ? info->evtchn : 0; in xen_rebind_evtchn_to_cpu()
1782 return -1; in xen_rebind_evtchn_to_cpu()
1785 return -1; in xen_rebind_evtchn_to_cpu()
1800 * it, but don't do the xenlinux-level rebind in that case. in xen_rebind_evtchn_to_cpu()
1844 ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu); in set_affinity_irq()
1853 struct irq_info *info = info_for_irq(data->irq); in enable_dynirq()
1854 evtchn_port_t evtchn = info ? info->evtchn : 0; in enable_dynirq()
1862 struct irq_info *info = info_for_irq(data->irq); in disable_dynirq()
1863 evtchn_port_t evtchn = info ? info->evtchn : 0; in disable_dynirq()
1871 struct irq_info *info = info_for_irq(data->irq); in ack_dynirq()
1872 evtchn_port_t evtchn = info ? info->evtchn : 0; in ack_dynirq()
1886 struct irq_info *info = info_for_irq(data->irq); in lateeoi_ack_dynirq()
1887 evtchn_port_t evtchn = info ? info->evtchn : 0; in lateeoi_ack_dynirq()
1893 * Need to keep is_active non-zero in order to ignore re-raised in lateeoi_ack_dynirq()
1902 struct irq_info *info = info_for_irq(data->irq); in lateeoi_mask_ack_dynirq()
1903 evtchn_port_t evtchn = info ? info->evtchn : 0; in lateeoi_mask_ack_dynirq()
1913 struct irq_info *info = info_for_irq(data->irq); in retrigger_dynirq()
1914 evtchn_port_t evtchn = info ? info->evtchn : 0; in retrigger_dynirq()
1933 if (info->type != IRQT_PIRQ) in restore_pirqs()
1936 pirq = info->u.pirq.pirq; in restore_pirqs()
1937 gsi = info->u.pirq.gsi; in restore_pirqs()
1938 irq = info->irq; in restore_pirqs()
1958 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); in restore_pirqs()
1972 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) in restore_cpu_virqs()
2001 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) in restore_cpu_ipis()
2025 evtchn_port_t evtchn = info ? info->evtchn : 0; in xen_clear_irq_pending()
2051 * the irq will be disabled so it won't deliver an interrupt. */
2069 * irq will be disabled so it won't deliver an interrupt. */
2082 return -ENOENT; in xen_test_irq_shared()
2084 irq_status.irq = info->u.pirq.pirq; in xen_test_irq_shared()
2097 /* New event-channel space is not 'live' yet. */ in xen_irq_resume()
2100 /* No IRQ <-> event-channel mappings. */ in xen_irq_resume()
2102 /* Zap event-channel binding */ in xen_irq_resume()
2103 info->evtchn = 0; in xen_irq_resume()
2119 .name = "xen-dyn",
2133 /* The chip name needs to contain "xen-dyn" for irqbalance to work. */
2134 .name = "xen-dyn-lateeoi",
2148 .name = "xen-pirq",
2168 .name = "xen-percpu",
2196 * Setup per-vCPU vector-type callbacks. If this setup is unavailable,
2197 * fallback to the global vector-type callback.
2257 if (evtchn_ops->percpu_init) in xen_evtchn_cpu_prepare()
2258 ret = evtchn_ops->percpu_init(cpu); in xen_evtchn_cpu_prepare()
2267 if (evtchn_ops->percpu_deinit) in xen_evtchn_cpu_dead()
2268 ret = evtchn_ops->percpu_deinit(cpu); in xen_evtchn_cpu_dead()
2275 int ret = -EINVAL; in xen_init_IRQ()