xref: /openbmc/qemu/hw/i386/kvm/xen_evtchn.c (revision 95a36455)
191cce756SDavid Woodhouse /*
291cce756SDavid Woodhouse  * QEMU Xen emulation: Event channel support
391cce756SDavid Woodhouse  *
491cce756SDavid Woodhouse  * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
591cce756SDavid Woodhouse  *
691cce756SDavid Woodhouse  * Authors: David Woodhouse <dwmw2@infradead.org>
791cce756SDavid Woodhouse  *
891cce756SDavid Woodhouse  * This work is licensed under the terms of the GNU GPL, version 2 or later.
991cce756SDavid Woodhouse  * See the COPYING file in the top-level directory.
1091cce756SDavid Woodhouse  */
1191cce756SDavid Woodhouse 
1291cce756SDavid Woodhouse #include "qemu/osdep.h"
1391cce756SDavid Woodhouse #include "qemu/host-utils.h"
1491cce756SDavid Woodhouse #include "qemu/module.h"
15a15b1097SDavid Woodhouse #include "qemu/lockable.h"
1691cce756SDavid Woodhouse #include "qemu/main-loop.h"
17f5417856SDavid Woodhouse #include "qemu/log.h"
18cc37d98bSRichard Henderson #include "qemu/error-report.h"
19507cb64dSJoao Martins #include "monitor/monitor.h"
20507cb64dSJoao Martins #include "monitor/hmp.h"
2191cce756SDavid Woodhouse #include "qapi/error.h"
22507cb64dSJoao Martins #include "qapi/qapi-commands-misc-target.h"
23507cb64dSJoao Martins #include "qapi/qmp/qdict.h"
2491cce756SDavid Woodhouse #include "qom/object.h"
2591cce756SDavid Woodhouse #include "exec/target_page.h"
2691cce756SDavid Woodhouse #include "exec/address-spaces.h"
2791cce756SDavid Woodhouse #include "migration/vmstate.h"
28aa98ee38SDavid Woodhouse #include "trace.h"
2991cce756SDavid Woodhouse 
3091cce756SDavid Woodhouse #include "hw/sysbus.h"
3191cce756SDavid Woodhouse #include "hw/xen/xen.h"
32ddf0fd9aSDavid Woodhouse #include "hw/i386/x86.h"
332aff696bSDavid Woodhouse #include "hw/i386/pc.h"
342aff696bSDavid Woodhouse #include "hw/pci/pci.h"
356096cf78SDavid Woodhouse #include "hw/pci/msi.h"
366096cf78SDavid Woodhouse #include "hw/pci/msix.h"
37ddf0fd9aSDavid Woodhouse #include "hw/irq.h"
384dfd5fb1SDavid Woodhouse #include "hw/xen/xen_backend_ops.h"
3983eb5811SDavid Woodhouse 
4091cce756SDavid Woodhouse #include "xen_evtchn.h"
414858ba20SDavid Woodhouse #include "xen_overlay.h"
42c08f5d0eSDavid Woodhouse #include "xen_xenstore.h"
4391cce756SDavid Woodhouse 
4491cce756SDavid Woodhouse #include "sysemu/kvm.h"
4591cce756SDavid Woodhouse #include "sysemu/kvm_xen.h"
4691cce756SDavid Woodhouse #include <linux/kvm.h>
47794fba23SDavid Woodhouse #include <sys/eventfd.h>
4891cce756SDavid Woodhouse 
4991cce756SDavid Woodhouse #include "hw/xen/interface/memory.h"
5091cce756SDavid Woodhouse #include "hw/xen/interface/hvm/params.h"
5191cce756SDavid Woodhouse 
526096cf78SDavid Woodhouse /* XX: For kvm_update_msi_routes_all() */
536096cf78SDavid Woodhouse #include "target/i386/kvm/kvm_i386.h"
546096cf78SDavid Woodhouse 
5591cce756SDavid Woodhouse #define TYPE_XEN_EVTCHN "xen-evtchn"
5691cce756SDavid Woodhouse OBJECT_DECLARE_SIMPLE_TYPE(XenEvtchnState, XEN_EVTCHN)
5791cce756SDavid Woodhouse 
584858ba20SDavid Woodhouse typedef struct XenEvtchnPort {
594858ba20SDavid Woodhouse     uint32_t vcpu;      /* Xen/ACPI vcpu_id */
604858ba20SDavid Woodhouse     uint16_t type;      /* EVTCHNSTAT_xxxx */
61be155098SDavid Woodhouse     union {
62be155098SDavid Woodhouse         uint16_t val;  /* raw value for serialization etc. */
63be155098SDavid Woodhouse         uint16_t pirq;
64be155098SDavid Woodhouse         uint16_t virq;
65be155098SDavid Woodhouse         struct {
66be155098SDavid Woodhouse             uint16_t port:15;
67be155098SDavid Woodhouse             uint16_t to_qemu:1; /* Only two targets; qemu or loopback */
68be155098SDavid Woodhouse         } interdomain;
69be155098SDavid Woodhouse     } u;
704858ba20SDavid Woodhouse } XenEvtchnPort;
714858ba20SDavid Woodhouse 
7283eb5811SDavid Woodhouse /* 32-bit compatibility definitions, also used natively in 32-bit build */
7383eb5811SDavid Woodhouse struct compat_arch_vcpu_info {
7483eb5811SDavid Woodhouse     unsigned int cr2;
7583eb5811SDavid Woodhouse     unsigned int pad[5];
7683eb5811SDavid Woodhouse };
7783eb5811SDavid Woodhouse 
7883eb5811SDavid Woodhouse struct compat_vcpu_info {
7983eb5811SDavid Woodhouse     uint8_t evtchn_upcall_pending;
8083eb5811SDavid Woodhouse     uint8_t evtchn_upcall_mask;
8183eb5811SDavid Woodhouse     uint16_t pad;
8283eb5811SDavid Woodhouse     uint32_t evtchn_pending_sel;
8383eb5811SDavid Woodhouse     struct compat_arch_vcpu_info arch;
8483eb5811SDavid Woodhouse     struct vcpu_time_info time;
8583eb5811SDavid Woodhouse }; /* 64 bytes (x86) */
8683eb5811SDavid Woodhouse 
8783eb5811SDavid Woodhouse struct compat_arch_shared_info {
8883eb5811SDavid Woodhouse     unsigned int max_pfn;
8983eb5811SDavid Woodhouse     unsigned int pfn_to_mfn_frame_list_list;
9083eb5811SDavid Woodhouse     unsigned int nmi_reason;
9183eb5811SDavid Woodhouse     unsigned int p2m_cr3;
9283eb5811SDavid Woodhouse     unsigned int p2m_vaddr;
9383eb5811SDavid Woodhouse     unsigned int p2m_generation;
9483eb5811SDavid Woodhouse     uint32_t wc_sec_hi;
9583eb5811SDavid Woodhouse };
9683eb5811SDavid Woodhouse 
9783eb5811SDavid Woodhouse struct compat_shared_info {
9883eb5811SDavid Woodhouse     struct compat_vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS];
9983eb5811SDavid Woodhouse     uint32_t evtchn_pending[32];
10083eb5811SDavid Woodhouse     uint32_t evtchn_mask[32];
10183eb5811SDavid Woodhouse     uint32_t wc_version;      /* Version counter: see vcpu_time_info_t. */
10283eb5811SDavid Woodhouse     uint32_t wc_sec;
10383eb5811SDavid Woodhouse     uint32_t wc_nsec;
10483eb5811SDavid Woodhouse     struct compat_arch_shared_info arch;
10583eb5811SDavid Woodhouse };
10683eb5811SDavid Woodhouse 
1074858ba20SDavid Woodhouse #define COMPAT_EVTCHN_2L_NR_CHANNELS            1024
1084858ba20SDavid Woodhouse 
109794fba23SDavid Woodhouse /* Local private implementation of struct xenevtchn_handle */
110794fba23SDavid Woodhouse struct xenevtchn_handle {
111794fba23SDavid Woodhouse     evtchn_port_t be_port;
112794fba23SDavid Woodhouse     evtchn_port_t guest_port; /* Or zero for unbound */
113794fba23SDavid Woodhouse     int fd;
114794fba23SDavid Woodhouse };
115794fba23SDavid Woodhouse 
1164858ba20SDavid Woodhouse /*
117aa98ee38SDavid Woodhouse  * These 'emuirq' values are used by Xen in the LM stream... and yes, I am
118aa98ee38SDavid Woodhouse  * insane enough to think about guest-transparent live migration from actual
119aa98ee38SDavid Woodhouse  * Xen to QEMU, and ensuring that we can convert/consume the stream.
120aa98ee38SDavid Woodhouse  */
121aa98ee38SDavid Woodhouse #define IRQ_UNBOUND -1
122aa98ee38SDavid Woodhouse #define IRQ_PT -2
123aa98ee38SDavid Woodhouse #define IRQ_MSI_EMU -3
124aa98ee38SDavid Woodhouse 
125aa98ee38SDavid Woodhouse 
126aa98ee38SDavid Woodhouse struct pirq_info {
127aa98ee38SDavid Woodhouse     int gsi;
128aa98ee38SDavid Woodhouse     uint16_t port;
1296096cf78SDavid Woodhouse     PCIDevice *dev;
1306096cf78SDavid Woodhouse     int vector;
1316096cf78SDavid Woodhouse     bool is_msix;
1326096cf78SDavid Woodhouse     bool is_masked;
1336096cf78SDavid Woodhouse     bool is_translated;
134aa98ee38SDavid Woodhouse };
135aa98ee38SDavid Woodhouse 
13691cce756SDavid Woodhouse struct XenEvtchnState {
13791cce756SDavid Woodhouse     /*< private >*/
13891cce756SDavid Woodhouse     SysBusDevice busdev;
13991cce756SDavid Woodhouse     /*< public >*/
14091cce756SDavid Woodhouse 
14191cce756SDavid Woodhouse     uint64_t callback_param;
14291cce756SDavid Woodhouse     bool evtchn_in_kernel;
1432aff696bSDavid Woodhouse     uint32_t callback_gsi;
14491cce756SDavid Woodhouse 
145ddf0fd9aSDavid Woodhouse     QEMUBH *gsi_bh;
146ddf0fd9aSDavid Woodhouse 
14791cce756SDavid Woodhouse     QemuMutex port_lock;
1484858ba20SDavid Woodhouse     uint32_t nr_ports;
1494858ba20SDavid Woodhouse     XenEvtchnPort port_table[EVTCHN_2L_NR_CHANNELS];
150eeedfe6cSDavid Woodhouse 
151eeedfe6cSDavid Woodhouse     /* Connected to the system GSIs for raising callback as GSI / INTx */
152eeedfe6cSDavid Woodhouse     unsigned int nr_callback_gsis;
153eeedfe6cSDavid Woodhouse     qemu_irq *callback_gsis;
154794fba23SDavid Woodhouse 
155794fba23SDavid Woodhouse     struct xenevtchn_handle *be_handles[EVTCHN_2L_NR_CHANNELS];
156aa98ee38SDavid Woodhouse 
157aa98ee38SDavid Woodhouse     uint32_t nr_pirqs;
158aa98ee38SDavid Woodhouse 
159aa98ee38SDavid Woodhouse     /* Bitmap of allocated PIRQs (serialized) */
160aa98ee38SDavid Woodhouse     uint16_t nr_pirq_inuse_words;
161aa98ee38SDavid Woodhouse     uint64_t *pirq_inuse_bitmap;
162aa98ee38SDavid Woodhouse 
163aa98ee38SDavid Woodhouse     /* GSI → PIRQ mapping (serialized) */
164aa98ee38SDavid Woodhouse     uint16_t gsi_pirq[IOAPIC_NUM_PINS];
165aa98ee38SDavid Woodhouse 
1664f81baa3SDavid Woodhouse     /* Per-GSI assertion state (serialized) */
1674f81baa3SDavid Woodhouse     uint32_t pirq_gsi_set;
1684f81baa3SDavid Woodhouse 
1696096cf78SDavid Woodhouse     /* Per-PIRQ information (rebuilt on migration, protected by BQL) */
170aa98ee38SDavid Woodhouse     struct pirq_info *pirq;
17191cce756SDavid Woodhouse };
17291cce756SDavid Woodhouse 
173aa98ee38SDavid Woodhouse #define pirq_inuse_word(s, pirq) (s->pirq_inuse_bitmap[((pirq) / 64)])
174aa98ee38SDavid Woodhouse #define pirq_inuse_bit(pirq) (1ULL << ((pirq) & 63))
175aa98ee38SDavid Woodhouse 
176aa98ee38SDavid Woodhouse #define pirq_inuse(s, pirq) (pirq_inuse_word(s, pirq) & pirq_inuse_bit(pirq))
177aa98ee38SDavid Woodhouse 
17891cce756SDavid Woodhouse struct XenEvtchnState *xen_evtchn_singleton;
17991cce756SDavid Woodhouse 
18091cce756SDavid Woodhouse /* Top bits of callback_param are the type (HVM_PARAM_CALLBACK_TYPE_xxx) */
18191cce756SDavid Woodhouse #define CALLBACK_VIA_TYPE_SHIFT 56
18291cce756SDavid Woodhouse 
183794fba23SDavid Woodhouse static void unbind_backend_ports(XenEvtchnState *s);
184794fba23SDavid Woodhouse 
xen_evtchn_pre_load(void * opaque)185794fba23SDavid Woodhouse static int xen_evtchn_pre_load(void *opaque)
186794fba23SDavid Woodhouse {
187794fba23SDavid Woodhouse     XenEvtchnState *s = opaque;
188794fba23SDavid Woodhouse 
189794fba23SDavid Woodhouse     /* Unbind all the backend-side ports; they need to rebind */
190794fba23SDavid Woodhouse     unbind_backend_ports(s);
191794fba23SDavid Woodhouse 
192aa98ee38SDavid Woodhouse     /* It'll be leaked otherwise. */
193aa98ee38SDavid Woodhouse     g_free(s->pirq_inuse_bitmap);
194aa98ee38SDavid Woodhouse     s->pirq_inuse_bitmap = NULL;
195aa98ee38SDavid Woodhouse 
196794fba23SDavid Woodhouse     return 0;
197794fba23SDavid Woodhouse }
198794fba23SDavid Woodhouse 
xen_evtchn_post_load(void * opaque,int version_id)19991cce756SDavid Woodhouse static int xen_evtchn_post_load(void *opaque, int version_id)
20091cce756SDavid Woodhouse {
20191cce756SDavid Woodhouse     XenEvtchnState *s = opaque;
202aa98ee38SDavid Woodhouse     uint32_t i;
20391cce756SDavid Woodhouse 
20491cce756SDavid Woodhouse     if (s->callback_param) {
20591cce756SDavid Woodhouse         xen_evtchn_set_callback_param(s->callback_param);
20691cce756SDavid Woodhouse     }
20791cce756SDavid Woodhouse 
208aa98ee38SDavid Woodhouse     /* Rebuild s->pirq[].port mapping */
209aa98ee38SDavid Woodhouse     for (i = 0; i < s->nr_ports; i++) {
210aa98ee38SDavid Woodhouse         XenEvtchnPort *p = &s->port_table[i];
211aa98ee38SDavid Woodhouse 
212aa98ee38SDavid Woodhouse         if (p->type == EVTCHNSTAT_pirq) {
213be155098SDavid Woodhouse             assert(p->u.pirq);
214be155098SDavid Woodhouse             assert(p->u.pirq < s->nr_pirqs);
215aa98ee38SDavid Woodhouse 
216aa98ee38SDavid Woodhouse             /*
217aa98ee38SDavid Woodhouse              * Set the gsi to IRQ_UNBOUND; it may be changed to an actual
218aa98ee38SDavid Woodhouse              * GSI# below, or to IRQ_MSI_EMU when the MSI table snooping
219aa98ee38SDavid Woodhouse              * catches up with it.
220aa98ee38SDavid Woodhouse              */
221be155098SDavid Woodhouse             s->pirq[p->u.pirq].gsi = IRQ_UNBOUND;
222be155098SDavid Woodhouse             s->pirq[p->u.pirq].port = i;
223aa98ee38SDavid Woodhouse         }
224aa98ee38SDavid Woodhouse     }
225aa98ee38SDavid Woodhouse     /* Rebuild s->pirq[].gsi mapping */
226aa98ee38SDavid Woodhouse     for (i = 0; i < IOAPIC_NUM_PINS; i++) {
227aa98ee38SDavid Woodhouse         if (s->gsi_pirq[i]) {
228aa98ee38SDavid Woodhouse             s->pirq[s->gsi_pirq[i]].gsi = i;
229aa98ee38SDavid Woodhouse         }
230aa98ee38SDavid Woodhouse     }
23191cce756SDavid Woodhouse     return 0;
23291cce756SDavid Woodhouse }
23391cce756SDavid Woodhouse 
xen_evtchn_is_needed(void * opaque)23491cce756SDavid Woodhouse static bool xen_evtchn_is_needed(void *opaque)
23591cce756SDavid Woodhouse {
23691cce756SDavid Woodhouse     return xen_mode == XEN_EMULATE;
23791cce756SDavid Woodhouse }
23891cce756SDavid Woodhouse 
2394858ba20SDavid Woodhouse static const VMStateDescription xen_evtchn_port_vmstate = {
2404858ba20SDavid Woodhouse     .name = "xen_evtchn_port",
2414858ba20SDavid Woodhouse     .version_id = 1,
2424858ba20SDavid Woodhouse     .minimum_version_id = 1,
2439231a017SRichard Henderson     .fields = (const VMStateField[]) {
2444858ba20SDavid Woodhouse         VMSTATE_UINT32(vcpu, XenEvtchnPort),
2454858ba20SDavid Woodhouse         VMSTATE_UINT16(type, XenEvtchnPort),
246be155098SDavid Woodhouse         VMSTATE_UINT16(u.val, XenEvtchnPort),
2474858ba20SDavid Woodhouse         VMSTATE_END_OF_LIST()
2484858ba20SDavid Woodhouse     }
2494858ba20SDavid Woodhouse };
2504858ba20SDavid Woodhouse 
25191cce756SDavid Woodhouse static const VMStateDescription xen_evtchn_vmstate = {
25291cce756SDavid Woodhouse     .name = "xen_evtchn",
25391cce756SDavid Woodhouse     .version_id = 1,
25491cce756SDavid Woodhouse     .minimum_version_id = 1,
25591cce756SDavid Woodhouse     .needed = xen_evtchn_is_needed,
256794fba23SDavid Woodhouse     .pre_load = xen_evtchn_pre_load,
25791cce756SDavid Woodhouse     .post_load = xen_evtchn_post_load,
2589231a017SRichard Henderson     .fields = (const VMStateField[]) {
25991cce756SDavid Woodhouse         VMSTATE_UINT64(callback_param, XenEvtchnState),
2604858ba20SDavid Woodhouse         VMSTATE_UINT32(nr_ports, XenEvtchnState),
2614858ba20SDavid Woodhouse         VMSTATE_STRUCT_VARRAY_UINT32(port_table, XenEvtchnState, nr_ports, 1,
2624858ba20SDavid Woodhouse                                      xen_evtchn_port_vmstate, XenEvtchnPort),
263aa98ee38SDavid Woodhouse         VMSTATE_UINT16_ARRAY(gsi_pirq, XenEvtchnState, IOAPIC_NUM_PINS),
264aa98ee38SDavid Woodhouse         VMSTATE_VARRAY_UINT16_ALLOC(pirq_inuse_bitmap, XenEvtchnState,
265aa98ee38SDavid Woodhouse                                     nr_pirq_inuse_words, 0,
266aa98ee38SDavid Woodhouse                                     vmstate_info_uint64, uint64_t),
2674f81baa3SDavid Woodhouse         VMSTATE_UINT32(pirq_gsi_set, XenEvtchnState),
26891cce756SDavid Woodhouse         VMSTATE_END_OF_LIST()
26991cce756SDavid Woodhouse     }
27091cce756SDavid Woodhouse };
27191cce756SDavid Woodhouse 
xen_evtchn_class_init(ObjectClass * klass,void * data)27291cce756SDavid Woodhouse static void xen_evtchn_class_init(ObjectClass *klass, void *data)
27391cce756SDavid Woodhouse {
27491cce756SDavid Woodhouse     DeviceClass *dc = DEVICE_CLASS(klass);
27591cce756SDavid Woodhouse 
27691cce756SDavid Woodhouse     dc->vmsd = &xen_evtchn_vmstate;
27791cce756SDavid Woodhouse }
27891cce756SDavid Woodhouse 
27991cce756SDavid Woodhouse static const TypeInfo xen_evtchn_info = {
28091cce756SDavid Woodhouse     .name          = TYPE_XEN_EVTCHN,
28191cce756SDavid Woodhouse     .parent        = TYPE_SYS_BUS_DEVICE,
28291cce756SDavid Woodhouse     .instance_size = sizeof(XenEvtchnState),
28391cce756SDavid Woodhouse     .class_init    = xen_evtchn_class_init,
28491cce756SDavid Woodhouse };
28591cce756SDavid Woodhouse 
2864dfd5fb1SDavid Woodhouse static struct evtchn_backend_ops emu_evtchn_backend_ops = {
2874dfd5fb1SDavid Woodhouse     .open = xen_be_evtchn_open,
2884dfd5fb1SDavid Woodhouse     .bind_interdomain = xen_be_evtchn_bind_interdomain,
2894dfd5fb1SDavid Woodhouse     .unbind = xen_be_evtchn_unbind,
2904dfd5fb1SDavid Woodhouse     .close = xen_be_evtchn_close,
2914dfd5fb1SDavid Woodhouse     .get_fd = xen_be_evtchn_fd,
2924dfd5fb1SDavid Woodhouse     .notify = xen_be_evtchn_notify,
2934dfd5fb1SDavid Woodhouse     .unmask = xen_be_evtchn_unmask,
2944dfd5fb1SDavid Woodhouse     .pending = xen_be_evtchn_pending,
2954dfd5fb1SDavid Woodhouse };
2964dfd5fb1SDavid Woodhouse 
gsi_assert_bh(void * opaque)297ddf0fd9aSDavid Woodhouse static void gsi_assert_bh(void *opaque)
298ddf0fd9aSDavid Woodhouse {
299ddf0fd9aSDavid Woodhouse     struct vcpu_info *vi = kvm_xen_get_vcpu_info_hva(0);
300ddf0fd9aSDavid Woodhouse     if (vi) {
301ddf0fd9aSDavid Woodhouse         xen_evtchn_set_callback_level(!!vi->evtchn_upcall_pending);
302ddf0fd9aSDavid Woodhouse     }
303ddf0fd9aSDavid Woodhouse }
304ddf0fd9aSDavid Woodhouse 
xen_evtchn_create(unsigned int nr_gsis,qemu_irq * system_gsis)305eeedfe6cSDavid Woodhouse void xen_evtchn_create(unsigned int nr_gsis, qemu_irq *system_gsis)
30691cce756SDavid Woodhouse {
30791cce756SDavid Woodhouse     XenEvtchnState *s = XEN_EVTCHN(sysbus_create_simple(TYPE_XEN_EVTCHN,
30891cce756SDavid Woodhouse                                                         -1, NULL));
309ddf0fd9aSDavid Woodhouse     int i;
310ddf0fd9aSDavid Woodhouse 
31191cce756SDavid Woodhouse     xen_evtchn_singleton = s;
31291cce756SDavid Woodhouse 
31391cce756SDavid Woodhouse     qemu_mutex_init(&s->port_lock);
314ddf0fd9aSDavid Woodhouse     s->gsi_bh = aio_bh_new(qemu_get_aio_context(), gsi_assert_bh, s);
315ddf0fd9aSDavid Woodhouse 
316eeedfe6cSDavid Woodhouse     /*
317eeedfe6cSDavid Woodhouse      * These are the *output* GSI from event channel support, for
318eeedfe6cSDavid Woodhouse      * signalling CPU0's events via GSI or PCI INTx instead of the
319eeedfe6cSDavid Woodhouse      * per-CPU vector. We create a *set* of irqs and connect one to
320eeedfe6cSDavid Woodhouse      * each of the system GSIs which were passed in from the platform
321eeedfe6cSDavid Woodhouse      * code, and then just trigger the right one as appropriate from
322eeedfe6cSDavid Woodhouse      * xen_evtchn_set_callback_level().
323eeedfe6cSDavid Woodhouse      */
324eeedfe6cSDavid Woodhouse     s->nr_callback_gsis = nr_gsis;
325eeedfe6cSDavid Woodhouse     s->callback_gsis = g_new0(qemu_irq, nr_gsis);
326eeedfe6cSDavid Woodhouse     for (i = 0; i < nr_gsis; i++) {
327eeedfe6cSDavid Woodhouse         sysbus_init_irq(SYS_BUS_DEVICE(s), &s->callback_gsis[i]);
328eeedfe6cSDavid Woodhouse         sysbus_connect_irq(SYS_BUS_DEVICE(s), i, system_gsis[i]);
329ddf0fd9aSDavid Woodhouse     }
330aa98ee38SDavid Woodhouse 
331aa98ee38SDavid Woodhouse     /*
332e16aff4cSDavid Woodhouse      * The Xen scheme for encoding PIRQ# into an MSI message is not
333e16aff4cSDavid Woodhouse      * compatible with 32-bit MSI, as it puts the high bits of the
334e16aff4cSDavid Woodhouse      * PIRQ# into the high bits of the MSI message address, instead of
335e16aff4cSDavid Woodhouse      * using the Extended Destination ID in address bits 4-11 which
336e16aff4cSDavid Woodhouse      * perhaps would have been a better choice.
337e16aff4cSDavid Woodhouse      *
338e16aff4cSDavid Woodhouse      * To keep life simple, kvm_accel_instance_init() initialises the
339e16aff4cSDavid Woodhouse      * default to 256. which conveniently doesn't need to set anything
340e16aff4cSDavid Woodhouse      * outside the low 32 bits of the address. It can be increased by
341e16aff4cSDavid Woodhouse      * setting the xen-evtchn-max-pirq property.
342aa98ee38SDavid Woodhouse      */
343e16aff4cSDavid Woodhouse     s->nr_pirqs = kvm_xen_get_evtchn_max_pirq();
344aa98ee38SDavid Woodhouse 
345aa98ee38SDavid Woodhouse     s->nr_pirq_inuse_words = DIV_ROUND_UP(s->nr_pirqs, 64);
346aa98ee38SDavid Woodhouse     s->pirq_inuse_bitmap = g_new0(uint64_t, s->nr_pirq_inuse_words);
347aa98ee38SDavid Woodhouse     s->pirq = g_new0(struct pirq_info, s->nr_pirqs);
3484dfd5fb1SDavid Woodhouse 
3494dfd5fb1SDavid Woodhouse     /* Set event channel functions for backend drivers to use */
3504dfd5fb1SDavid Woodhouse     xen_evtchn_ops = &emu_evtchn_backend_ops;
351ddf0fd9aSDavid Woodhouse }
352ddf0fd9aSDavid Woodhouse 
xen_evtchn_register_types(void)35391cce756SDavid Woodhouse static void xen_evtchn_register_types(void)
35491cce756SDavid Woodhouse {
35591cce756SDavid Woodhouse     type_register_static(&xen_evtchn_info);
35691cce756SDavid Woodhouse }
35791cce756SDavid Woodhouse 
type_init(xen_evtchn_register_types)35891cce756SDavid Woodhouse type_init(xen_evtchn_register_types)
35991cce756SDavid Woodhouse 
3602aff696bSDavid Woodhouse static int set_callback_pci_intx(XenEvtchnState *s, uint64_t param)
3612aff696bSDavid Woodhouse {
3622aff696bSDavid Woodhouse     PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
3632aff696bSDavid Woodhouse     uint8_t pin = param & 3;
3642aff696bSDavid Woodhouse     uint8_t devfn = (param >> 8) & 0xff;
3652aff696bSDavid Woodhouse     uint16_t bus = (param >> 16) & 0xffff;
3662aff696bSDavid Woodhouse     uint16_t domain = (param >> 32) & 0xffff;
3672aff696bSDavid Woodhouse     PCIDevice *pdev;
3682aff696bSDavid Woodhouse     PCIINTxRoute r;
3692aff696bSDavid Woodhouse 
3702aff696bSDavid Woodhouse     if (domain || !pcms) {
3712aff696bSDavid Woodhouse         return 0;
3722aff696bSDavid Woodhouse     }
3732aff696bSDavid Woodhouse 
374b54a9d46SBernhard Beschow     pdev = pci_find_device(pcms->pcibus, bus, devfn);
3752aff696bSDavid Woodhouse     if (!pdev) {
3762aff696bSDavid Woodhouse         return 0;
3772aff696bSDavid Woodhouse     }
3782aff696bSDavid Woodhouse 
3792aff696bSDavid Woodhouse     r = pci_device_route_intx_to_irq(pdev, pin);
3802aff696bSDavid Woodhouse     if (r.mode != PCI_INTX_ENABLED) {
3812aff696bSDavid Woodhouse         return 0;
3822aff696bSDavid Woodhouse     }
3832aff696bSDavid Woodhouse 
3842aff696bSDavid Woodhouse     /*
3852aff696bSDavid Woodhouse      * Hm, can we be notified of INTX routing changes? Not without
3862aff696bSDavid Woodhouse      * *owning* the device and being allowed to overwrite its own
3872aff696bSDavid Woodhouse      * ->intx_routing_notifier, AFAICT. So let's not.
3882aff696bSDavid Woodhouse      */
3892aff696bSDavid Woodhouse     return r.irq;
3902aff696bSDavid Woodhouse }
3912aff696bSDavid Woodhouse 
xen_evtchn_set_callback_level(int level)392ddf0fd9aSDavid Woodhouse void xen_evtchn_set_callback_level(int level)
393ddf0fd9aSDavid Woodhouse {
394ddf0fd9aSDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
395ddf0fd9aSDavid Woodhouse     if (!s) {
396ddf0fd9aSDavid Woodhouse         return;
397ddf0fd9aSDavid Woodhouse     }
398ddf0fd9aSDavid Woodhouse 
399ddf0fd9aSDavid Woodhouse     /*
400ddf0fd9aSDavid Woodhouse      * We get to this function in a number of ways:
401ddf0fd9aSDavid Woodhouse      *
402ddf0fd9aSDavid Woodhouse      *  • From I/O context, via PV backend drivers sending a notification to
403ddf0fd9aSDavid Woodhouse      *    the guest.
404ddf0fd9aSDavid Woodhouse      *
405ddf0fd9aSDavid Woodhouse      *  • From guest vCPU context, via loopback interdomain event channels
406ddf0fd9aSDavid Woodhouse      *    (or theoretically even IPIs but guests don't use those with GSI
407ddf0fd9aSDavid Woodhouse      *    delivery because that's pointless. We don't want a malicious guest
408ddf0fd9aSDavid Woodhouse      *    to be able to trigger a deadlock though, so we can't rule it out.)
409ddf0fd9aSDavid Woodhouse      *
410ddf0fd9aSDavid Woodhouse      *  • From guest vCPU context when the HVM_PARAM_CALLBACK_IRQ is being
411ddf0fd9aSDavid Woodhouse      *    configured.
412ddf0fd9aSDavid Woodhouse      *
413ddf0fd9aSDavid Woodhouse      *  • From guest vCPU context in the KVM exit handler, if the upcall
414ddf0fd9aSDavid Woodhouse      *    pending flag has been cleared and the GSI needs to be deasserted.
415ddf0fd9aSDavid Woodhouse      *
416ddf0fd9aSDavid Woodhouse      *  • Maybe in future, in an interrupt ack/eoi notifier when the GSI has
417ddf0fd9aSDavid Woodhouse      *    been acked in the irqchip.
418ddf0fd9aSDavid Woodhouse      *
419ddf0fd9aSDavid Woodhouse      * Whichever context we come from if we aren't already holding the BQL
420ddf0fd9aSDavid Woodhouse      * then e can't take it now, as we may already hold s->port_lock. So
421ddf0fd9aSDavid Woodhouse      * trigger the BH to set the IRQ for us instead of doing it immediately.
422ddf0fd9aSDavid Woodhouse      *
423ddf0fd9aSDavid Woodhouse      * In the HVM_PARAM_CALLBACK_IRQ and KVM exit handler cases, the caller
424ddf0fd9aSDavid Woodhouse      * will deliberately take the BQL because they want the change to take
425ddf0fd9aSDavid Woodhouse      * effect immediately. That just leaves interdomain loopback as the case
426ddf0fd9aSDavid Woodhouse      * which uses the BH.
427ddf0fd9aSDavid Woodhouse      */
428195801d7SStefan Hajnoczi     if (!bql_locked()) {
429ddf0fd9aSDavid Woodhouse         qemu_bh_schedule(s->gsi_bh);
430ddf0fd9aSDavid Woodhouse         return;
431ddf0fd9aSDavid Woodhouse     }
432ddf0fd9aSDavid Woodhouse 
433eeedfe6cSDavid Woodhouse     if (s->callback_gsi && s->callback_gsi < s->nr_callback_gsis) {
434eeedfe6cSDavid Woodhouse         qemu_set_irq(s->callback_gsis[s->callback_gsi], level);
435ddf0fd9aSDavid Woodhouse         if (level) {
436ddf0fd9aSDavid Woodhouse             /* Ensure the vCPU polls for deassertion */
437ddf0fd9aSDavid Woodhouse             kvm_xen_set_callback_asserted();
438ddf0fd9aSDavid Woodhouse         }
439ddf0fd9aSDavid Woodhouse     }
440ddf0fd9aSDavid Woodhouse }
441ddf0fd9aSDavid Woodhouse 
xen_evtchn_set_callback_param(uint64_t param)44291cce756SDavid Woodhouse int xen_evtchn_set_callback_param(uint64_t param)
44391cce756SDavid Woodhouse {
44491cce756SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
44591cce756SDavid Woodhouse     struct kvm_xen_hvm_attr xa = {
44691cce756SDavid Woodhouse         .type = KVM_XEN_ATTR_TYPE_UPCALL_VECTOR,
44791cce756SDavid Woodhouse         .u.vector = 0,
44891cce756SDavid Woodhouse     };
44991cce756SDavid Woodhouse     bool in_kernel = false;
4502aff696bSDavid Woodhouse     uint32_t gsi = 0;
4512aff696bSDavid Woodhouse     int type = param >> CALLBACK_VIA_TYPE_SHIFT;
45291cce756SDavid Woodhouse     int ret;
45391cce756SDavid Woodhouse 
45491cce756SDavid Woodhouse     if (!s) {
45591cce756SDavid Woodhouse         return -ENOTSUP;
45691cce756SDavid Woodhouse     }
45791cce756SDavid Woodhouse 
4582aff696bSDavid Woodhouse     /*
4592aff696bSDavid Woodhouse      * We need the BQL because set_callback_pci_intx() may call into PCI code,
4602aff696bSDavid Woodhouse      * and because we may need to manipulate the old and new GSI levels.
4612aff696bSDavid Woodhouse      */
462195801d7SStefan Hajnoczi     assert(bql_locked());
46391cce756SDavid Woodhouse     qemu_mutex_lock(&s->port_lock);
46491cce756SDavid Woodhouse 
4652aff696bSDavid Woodhouse     switch (type) {
46691cce756SDavid Woodhouse     case HVM_PARAM_CALLBACK_TYPE_VECTOR: {
46791cce756SDavid Woodhouse         xa.u.vector = (uint8_t)param,
46891cce756SDavid Woodhouse 
46991cce756SDavid Woodhouse         ret = kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &xa);
47091cce756SDavid Woodhouse         if (!ret && kvm_xen_has_cap(EVTCHN_SEND)) {
47191cce756SDavid Woodhouse             in_kernel = true;
47291cce756SDavid Woodhouse         }
4732aff696bSDavid Woodhouse         gsi = 0;
47491cce756SDavid Woodhouse         break;
47591cce756SDavid Woodhouse     }
476ddf0fd9aSDavid Woodhouse 
4772aff696bSDavid Woodhouse     case HVM_PARAM_CALLBACK_TYPE_PCI_INTX:
4782aff696bSDavid Woodhouse         gsi = set_callback_pci_intx(s, param);
4792aff696bSDavid Woodhouse         ret = gsi ? 0 : -EINVAL;
4802aff696bSDavid Woodhouse         break;
4812aff696bSDavid Woodhouse 
482ddf0fd9aSDavid Woodhouse     case HVM_PARAM_CALLBACK_TYPE_GSI:
4832aff696bSDavid Woodhouse         gsi = (uint32_t)param;
484ddf0fd9aSDavid Woodhouse         ret = 0;
485ddf0fd9aSDavid Woodhouse         break;
486ddf0fd9aSDavid Woodhouse 
48791cce756SDavid Woodhouse     default:
48891cce756SDavid Woodhouse         /* Xen doesn't return error even if you set something bogus */
48991cce756SDavid Woodhouse         ret = 0;
49091cce756SDavid Woodhouse         break;
49191cce756SDavid Woodhouse     }
49291cce756SDavid Woodhouse 
49318e83f28SDavid Woodhouse     /* If the guest has set a per-vCPU callback vector, prefer that. */
49418e83f28SDavid Woodhouse     if (gsi && kvm_xen_has_vcpu_callback_vector()) {
49518e83f28SDavid Woodhouse         in_kernel = kvm_xen_has_cap(EVTCHN_SEND);
49618e83f28SDavid Woodhouse         gsi = 0;
49718e83f28SDavid Woodhouse     }
49818e83f28SDavid Woodhouse 
49991cce756SDavid Woodhouse     if (!ret) {
50091cce756SDavid Woodhouse         /* If vector delivery was turned *off* then tell the kernel */
50191cce756SDavid Woodhouse         if ((s->callback_param >> CALLBACK_VIA_TYPE_SHIFT) ==
50291cce756SDavid Woodhouse             HVM_PARAM_CALLBACK_TYPE_VECTOR && !xa.u.vector) {
50391cce756SDavid Woodhouse             kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &xa);
50491cce756SDavid Woodhouse         }
50591cce756SDavid Woodhouse         s->callback_param = param;
50691cce756SDavid Woodhouse         s->evtchn_in_kernel = in_kernel;
5072aff696bSDavid Woodhouse 
5082aff696bSDavid Woodhouse         if (gsi != s->callback_gsi) {
5092aff696bSDavid Woodhouse             struct vcpu_info *vi = kvm_xen_get_vcpu_info_hva(0);
5102aff696bSDavid Woodhouse 
5112aff696bSDavid Woodhouse             xen_evtchn_set_callback_level(0);
5122aff696bSDavid Woodhouse             s->callback_gsi = gsi;
5132aff696bSDavid Woodhouse 
5142aff696bSDavid Woodhouse             if (gsi && vi && vi->evtchn_upcall_pending) {
5152aff696bSDavid Woodhouse                 kvm_xen_inject_vcpu_callback_vector(0, type);
5162aff696bSDavid Woodhouse             }
5172aff696bSDavid Woodhouse         }
51891cce756SDavid Woodhouse     }
51991cce756SDavid Woodhouse 
52091cce756SDavid Woodhouse     qemu_mutex_unlock(&s->port_lock);
52191cce756SDavid Woodhouse 
52291cce756SDavid Woodhouse     return ret;
52391cce756SDavid Woodhouse }
5244858ba20SDavid Woodhouse 
inject_callback(XenEvtchnState * s,uint32_t vcpu)525190cc3c0SDavid Woodhouse static void inject_callback(XenEvtchnState *s, uint32_t vcpu)
526190cc3c0SDavid Woodhouse {
527190cc3c0SDavid Woodhouse     int type = s->callback_param >> CALLBACK_VIA_TYPE_SHIFT;
528190cc3c0SDavid Woodhouse 
529190cc3c0SDavid Woodhouse     kvm_xen_inject_vcpu_callback_vector(vcpu, type);
530190cc3c0SDavid Woodhouse }
531190cc3c0SDavid Woodhouse 
deassign_kernel_port(evtchn_port_t port)532f5417856SDavid Woodhouse static void deassign_kernel_port(evtchn_port_t port)
533f5417856SDavid Woodhouse {
534f5417856SDavid Woodhouse     struct kvm_xen_hvm_attr ha;
535f5417856SDavid Woodhouse     int ret;
536f5417856SDavid Woodhouse 
537f5417856SDavid Woodhouse     ha.type = KVM_XEN_ATTR_TYPE_EVTCHN;
538f5417856SDavid Woodhouse     ha.u.evtchn.send_port = port;
539f5417856SDavid Woodhouse     ha.u.evtchn.flags = KVM_XEN_EVTCHN_DEASSIGN;
540f5417856SDavid Woodhouse 
541f5417856SDavid Woodhouse     ret = kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &ha);
542f5417856SDavid Woodhouse     if (ret) {
543f5417856SDavid Woodhouse         qemu_log_mask(LOG_GUEST_ERROR, "Failed to unbind kernel port %d: %s\n",
544f5417856SDavid Woodhouse                       port, strerror(ret));
545f5417856SDavid Woodhouse     }
546f5417856SDavid Woodhouse }
547f5417856SDavid Woodhouse 
assign_kernel_port(uint16_t type,evtchn_port_t port,uint32_t vcpu_id)548f5417856SDavid Woodhouse static int assign_kernel_port(uint16_t type, evtchn_port_t port,
549f5417856SDavid Woodhouse                               uint32_t vcpu_id)
550f5417856SDavid Woodhouse {
551f5417856SDavid Woodhouse     CPUState *cpu = qemu_get_cpu(vcpu_id);
552f5417856SDavid Woodhouse     struct kvm_xen_hvm_attr ha;
553f5417856SDavid Woodhouse 
554f5417856SDavid Woodhouse     if (!cpu) {
555f5417856SDavid Woodhouse         return -ENOENT;
556f5417856SDavid Woodhouse     }
557f5417856SDavid Woodhouse 
558f5417856SDavid Woodhouse     ha.type = KVM_XEN_ATTR_TYPE_EVTCHN;
559f5417856SDavid Woodhouse     ha.u.evtchn.send_port = port;
560f5417856SDavid Woodhouse     ha.u.evtchn.type = type;
561f5417856SDavid Woodhouse     ha.u.evtchn.flags = 0;
562f5417856SDavid Woodhouse     ha.u.evtchn.deliver.port.port = port;
563f5417856SDavid Woodhouse     ha.u.evtchn.deliver.port.vcpu = kvm_arch_vcpu_id(cpu);
564f5417856SDavid Woodhouse     ha.u.evtchn.deliver.port.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
565f5417856SDavid Woodhouse 
566f5417856SDavid Woodhouse     return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &ha);
567f5417856SDavid Woodhouse }
568f5417856SDavid Woodhouse 
assign_kernel_eventfd(uint16_t type,evtchn_port_t port,int fd)569794fba23SDavid Woodhouse static int assign_kernel_eventfd(uint16_t type, evtchn_port_t port, int fd)
570794fba23SDavid Woodhouse {
571794fba23SDavid Woodhouse     struct kvm_xen_hvm_attr ha;
572794fba23SDavid Woodhouse 
573794fba23SDavid Woodhouse     ha.type = KVM_XEN_ATTR_TYPE_EVTCHN;
574794fba23SDavid Woodhouse     ha.u.evtchn.send_port = port;
575794fba23SDavid Woodhouse     ha.u.evtchn.type = type;
576794fba23SDavid Woodhouse     ha.u.evtchn.flags = 0;
577794fba23SDavid Woodhouse     ha.u.evtchn.deliver.eventfd.port = 0;
578794fba23SDavid Woodhouse     ha.u.evtchn.deliver.eventfd.fd = fd;
579794fba23SDavid Woodhouse 
580794fba23SDavid Woodhouse     return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &ha);
581794fba23SDavid Woodhouse }
582794fba23SDavid Woodhouse 
valid_port(evtchn_port_t port)5834858ba20SDavid Woodhouse static bool valid_port(evtchn_port_t port)
5844858ba20SDavid Woodhouse {
5854858ba20SDavid Woodhouse     if (!port) {
5864858ba20SDavid Woodhouse         return false;
5874858ba20SDavid Woodhouse     }
5884858ba20SDavid Woodhouse 
5894858ba20SDavid Woodhouse     if (xen_is_long_mode()) {
5904858ba20SDavid Woodhouse         return port < EVTCHN_2L_NR_CHANNELS;
5914858ba20SDavid Woodhouse     } else {
5924858ba20SDavid Woodhouse         return port < COMPAT_EVTCHN_2L_NR_CHANNELS;
5934858ba20SDavid Woodhouse     }
5944858ba20SDavid Woodhouse }
5954858ba20SDavid Woodhouse 
valid_vcpu(uint32_t vcpu)596c723d4c1SDavid Woodhouse static bool valid_vcpu(uint32_t vcpu)
597c723d4c1SDavid Woodhouse {
598c723d4c1SDavid Woodhouse     return !!qemu_get_cpu(vcpu);
599c723d4c1SDavid Woodhouse }
600c723d4c1SDavid Woodhouse 
unbind_backend_ports(XenEvtchnState * s)601794fba23SDavid Woodhouse static void unbind_backend_ports(XenEvtchnState *s)
602794fba23SDavid Woodhouse {
603794fba23SDavid Woodhouse     XenEvtchnPort *p;
604794fba23SDavid Woodhouse     int i;
605794fba23SDavid Woodhouse 
606794fba23SDavid Woodhouse     for (i = 1; i < s->nr_ports; i++) {
607794fba23SDavid Woodhouse         p = &s->port_table[i];
608be155098SDavid Woodhouse         if (p->type == EVTCHNSTAT_interdomain && p->u.interdomain.to_qemu) {
609be155098SDavid Woodhouse             evtchn_port_t be_port = p->u.interdomain.port;
610794fba23SDavid Woodhouse 
611794fba23SDavid Woodhouse             if (s->be_handles[be_port]) {
612794fba23SDavid Woodhouse                 /* This part will be overwritten on the load anyway. */
613794fba23SDavid Woodhouse                 p->type = EVTCHNSTAT_unbound;
614be155098SDavid Woodhouse                 p->u.interdomain.port = 0;
615794fba23SDavid Woodhouse 
616794fba23SDavid Woodhouse                 /* Leave the backend port open and unbound too. */
617794fba23SDavid Woodhouse                 if (kvm_xen_has_cap(EVTCHN_SEND)) {
618794fba23SDavid Woodhouse                     deassign_kernel_port(i);
619794fba23SDavid Woodhouse                 }
620794fba23SDavid Woodhouse                 s->be_handles[be_port]->guest_port = 0;
621794fba23SDavid Woodhouse             }
622794fba23SDavid Woodhouse         }
623794fba23SDavid Woodhouse     }
624794fba23SDavid Woodhouse }
625794fba23SDavid Woodhouse 
xen_evtchn_status_op(struct evtchn_status * status)6264858ba20SDavid Woodhouse int xen_evtchn_status_op(struct evtchn_status *status)
6274858ba20SDavid Woodhouse {
6284858ba20SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
6294858ba20SDavid Woodhouse     XenEvtchnPort *p;
6304858ba20SDavid Woodhouse 
6314858ba20SDavid Woodhouse     if (!s) {
6324858ba20SDavid Woodhouse         return -ENOTSUP;
6334858ba20SDavid Woodhouse     }
6344858ba20SDavid Woodhouse 
6354858ba20SDavid Woodhouse     if (status->dom != DOMID_SELF && status->dom != xen_domid) {
6364858ba20SDavid Woodhouse         return -ESRCH;
6374858ba20SDavid Woodhouse     }
6384858ba20SDavid Woodhouse 
6394858ba20SDavid Woodhouse     if (!valid_port(status->port)) {
6404858ba20SDavid Woodhouse         return -EINVAL;
6414858ba20SDavid Woodhouse     }
6424858ba20SDavid Woodhouse 
6434858ba20SDavid Woodhouse     qemu_mutex_lock(&s->port_lock);
6444858ba20SDavid Woodhouse 
6454858ba20SDavid Woodhouse     p = &s->port_table[status->port];
6464858ba20SDavid Woodhouse 
6474858ba20SDavid Woodhouse     status->status = p->type;
6484858ba20SDavid Woodhouse     status->vcpu = p->vcpu;
6494858ba20SDavid Woodhouse 
6504858ba20SDavid Woodhouse     switch (p->type) {
6514858ba20SDavid Woodhouse     case EVTCHNSTAT_unbound:
652be155098SDavid Woodhouse         status->u.unbound.dom = p->u.interdomain.to_qemu ? DOMID_QEMU
653be155098SDavid Woodhouse                                                          : xen_domid;
6544858ba20SDavid Woodhouse         break;
6554858ba20SDavid Woodhouse 
6564858ba20SDavid Woodhouse     case EVTCHNSTAT_interdomain:
657be155098SDavid Woodhouse         status->u.interdomain.dom = p->u.interdomain.to_qemu ? DOMID_QEMU
658be155098SDavid Woodhouse                                                              : xen_domid;
659be155098SDavid Woodhouse         status->u.interdomain.port = p->u.interdomain.port;
6604858ba20SDavid Woodhouse         break;
6614858ba20SDavid Woodhouse 
6624858ba20SDavid Woodhouse     case EVTCHNSTAT_pirq:
663be155098SDavid Woodhouse         status->u.pirq = p->u.pirq;
6644858ba20SDavid Woodhouse         break;
6654858ba20SDavid Woodhouse 
6664858ba20SDavid Woodhouse     case EVTCHNSTAT_virq:
667be155098SDavid Woodhouse         status->u.virq = p->u.virq;
6684858ba20SDavid Woodhouse         break;
6694858ba20SDavid Woodhouse     }
6704858ba20SDavid Woodhouse 
6714858ba20SDavid Woodhouse     qemu_mutex_unlock(&s->port_lock);
6724858ba20SDavid Woodhouse     return 0;
6734858ba20SDavid Woodhouse }
67483eb5811SDavid Woodhouse 
675190cc3c0SDavid Woodhouse /*
676190cc3c0SDavid Woodhouse  * Never thought I'd hear myself say this, but C++ templates would be
677190cc3c0SDavid Woodhouse  * kind of nice here.
678190cc3c0SDavid Woodhouse  *
679190cc3c0SDavid Woodhouse  * template<class T> static int do_unmask_port(T *shinfo, ...);
680190cc3c0SDavid Woodhouse  */
do_unmask_port_lm(XenEvtchnState * s,evtchn_port_t port,bool do_unmask,struct shared_info * shinfo,struct vcpu_info * vcpu_info)681190cc3c0SDavid Woodhouse static int do_unmask_port_lm(XenEvtchnState *s, evtchn_port_t port,
682190cc3c0SDavid Woodhouse                              bool do_unmask, struct shared_info *shinfo,
683190cc3c0SDavid Woodhouse                              struct vcpu_info *vcpu_info)
684190cc3c0SDavid Woodhouse {
685190cc3c0SDavid Woodhouse     const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
686190cc3c0SDavid Woodhouse     typeof(shinfo->evtchn_pending[0]) mask;
687190cc3c0SDavid Woodhouse     int idx = port / bits_per_word;
688190cc3c0SDavid Woodhouse     int offset = port % bits_per_word;
689190cc3c0SDavid Woodhouse 
690190cc3c0SDavid Woodhouse     mask = 1UL << offset;
691190cc3c0SDavid Woodhouse 
692190cc3c0SDavid Woodhouse     if (idx >= bits_per_word) {
693190cc3c0SDavid Woodhouse         return -EINVAL;
694190cc3c0SDavid Woodhouse     }
695190cc3c0SDavid Woodhouse 
696190cc3c0SDavid Woodhouse     if (do_unmask) {
697190cc3c0SDavid Woodhouse         /*
698190cc3c0SDavid Woodhouse          * If this is a true unmask operation, clear the mask bit. If
699190cc3c0SDavid Woodhouse          * it was already unmasked, we have nothing further to do.
700190cc3c0SDavid Woodhouse          */
701190cc3c0SDavid Woodhouse         if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) {
702190cc3c0SDavid Woodhouse             return 0;
703190cc3c0SDavid Woodhouse         }
704190cc3c0SDavid Woodhouse     } else {
705190cc3c0SDavid Woodhouse         /*
706190cc3c0SDavid Woodhouse          * This is a pseudo-unmask for affinity changes. We don't
707190cc3c0SDavid Woodhouse          * change the mask bit, and if it's *masked* we have nothing
708190cc3c0SDavid Woodhouse          * else to do.
709190cc3c0SDavid Woodhouse          */
710190cc3c0SDavid Woodhouse         if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
711190cc3c0SDavid Woodhouse             return 0;
712190cc3c0SDavid Woodhouse         }
713190cc3c0SDavid Woodhouse     }
714190cc3c0SDavid Woodhouse 
715190cc3c0SDavid Woodhouse     /* If the event was not pending, we're done. */
716190cc3c0SDavid Woodhouse     if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) {
717190cc3c0SDavid Woodhouse         return 0;
718190cc3c0SDavid Woodhouse     }
719190cc3c0SDavid Woodhouse 
720190cc3c0SDavid Woodhouse     /* Now on to the vcpu_info evtchn_pending_sel index... */
721190cc3c0SDavid Woodhouse     mask = 1UL << idx;
722190cc3c0SDavid Woodhouse 
723190cc3c0SDavid Woodhouse     /* If a port in this word was already pending for this vCPU, all done. */
724190cc3c0SDavid Woodhouse     if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
725190cc3c0SDavid Woodhouse         return 0;
726190cc3c0SDavid Woodhouse     }
727190cc3c0SDavid Woodhouse 
728190cc3c0SDavid Woodhouse     /* Set evtchn_upcall_pending for this vCPU */
729190cc3c0SDavid Woodhouse     if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
730190cc3c0SDavid Woodhouse         return 0;
731190cc3c0SDavid Woodhouse     }
732190cc3c0SDavid Woodhouse 
733190cc3c0SDavid Woodhouse     inject_callback(s, s->port_table[port].vcpu);
734190cc3c0SDavid Woodhouse 
735190cc3c0SDavid Woodhouse     return 0;
736190cc3c0SDavid Woodhouse }
737190cc3c0SDavid Woodhouse 
do_unmask_port_compat(XenEvtchnState * s,evtchn_port_t port,bool do_unmask,struct compat_shared_info * shinfo,struct compat_vcpu_info * vcpu_info)738190cc3c0SDavid Woodhouse static int do_unmask_port_compat(XenEvtchnState *s, evtchn_port_t port,
739190cc3c0SDavid Woodhouse                                  bool do_unmask,
740190cc3c0SDavid Woodhouse                                  struct compat_shared_info *shinfo,
741190cc3c0SDavid Woodhouse                                  struct compat_vcpu_info *vcpu_info)
742190cc3c0SDavid Woodhouse {
743190cc3c0SDavid Woodhouse     const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
744190cc3c0SDavid Woodhouse     typeof(shinfo->evtchn_pending[0]) mask;
745190cc3c0SDavid Woodhouse     int idx = port / bits_per_word;
746190cc3c0SDavid Woodhouse     int offset = port % bits_per_word;
747190cc3c0SDavid Woodhouse 
748190cc3c0SDavid Woodhouse     mask = 1UL << offset;
749190cc3c0SDavid Woodhouse 
750190cc3c0SDavid Woodhouse     if (idx >= bits_per_word) {
751190cc3c0SDavid Woodhouse         return -EINVAL;
752190cc3c0SDavid Woodhouse     }
753190cc3c0SDavid Woodhouse 
754190cc3c0SDavid Woodhouse     if (do_unmask) {
755190cc3c0SDavid Woodhouse         /*
756190cc3c0SDavid Woodhouse          * If this is a true unmask operation, clear the mask bit. If
757190cc3c0SDavid Woodhouse          * it was already unmasked, we have nothing further to do.
758190cc3c0SDavid Woodhouse          */
759190cc3c0SDavid Woodhouse         if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) {
760190cc3c0SDavid Woodhouse             return 0;
761190cc3c0SDavid Woodhouse         }
762190cc3c0SDavid Woodhouse     } else {
763190cc3c0SDavid Woodhouse         /*
764190cc3c0SDavid Woodhouse          * This is a pseudo-unmask for affinity changes. We don't
765190cc3c0SDavid Woodhouse          * change the mask bit, and if it's *masked* we have nothing
766190cc3c0SDavid Woodhouse          * else to do.
767190cc3c0SDavid Woodhouse          */
768190cc3c0SDavid Woodhouse         if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
769190cc3c0SDavid Woodhouse             return 0;
770190cc3c0SDavid Woodhouse         }
771190cc3c0SDavid Woodhouse     }
772190cc3c0SDavid Woodhouse 
773190cc3c0SDavid Woodhouse     /* If the event was not pending, we're done. */
774190cc3c0SDavid Woodhouse     if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) {
775190cc3c0SDavid Woodhouse         return 0;
776190cc3c0SDavid Woodhouse     }
777190cc3c0SDavid Woodhouse 
778190cc3c0SDavid Woodhouse     /* Now on to the vcpu_info evtchn_pending_sel index... */
779190cc3c0SDavid Woodhouse     mask = 1UL << idx;
780190cc3c0SDavid Woodhouse 
781190cc3c0SDavid Woodhouse     /* If a port in this word was already pending for this vCPU, all done. */
782190cc3c0SDavid Woodhouse     if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
783190cc3c0SDavid Woodhouse         return 0;
784190cc3c0SDavid Woodhouse     }
785190cc3c0SDavid Woodhouse 
786190cc3c0SDavid Woodhouse     /* Set evtchn_upcall_pending for this vCPU */
787190cc3c0SDavid Woodhouse     if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
788190cc3c0SDavid Woodhouse         return 0;
789190cc3c0SDavid Woodhouse     }
790190cc3c0SDavid Woodhouse 
791190cc3c0SDavid Woodhouse     inject_callback(s, s->port_table[port].vcpu);
792190cc3c0SDavid Woodhouse 
793190cc3c0SDavid Woodhouse     return 0;
794190cc3c0SDavid Woodhouse }
795190cc3c0SDavid Woodhouse 
unmask_port(XenEvtchnState * s,evtchn_port_t port,bool do_unmask)796190cc3c0SDavid Woodhouse static int unmask_port(XenEvtchnState *s, evtchn_port_t port, bool do_unmask)
797190cc3c0SDavid Woodhouse {
798190cc3c0SDavid Woodhouse     void *vcpu_info, *shinfo;
799190cc3c0SDavid Woodhouse 
800190cc3c0SDavid Woodhouse     if (s->port_table[port].type == EVTCHNSTAT_closed) {
801190cc3c0SDavid Woodhouse         return -EINVAL;
802190cc3c0SDavid Woodhouse     }
803190cc3c0SDavid Woodhouse 
804190cc3c0SDavid Woodhouse     shinfo = xen_overlay_get_shinfo_ptr();
805190cc3c0SDavid Woodhouse     if (!shinfo) {
806190cc3c0SDavid Woodhouse         return -ENOTSUP;
807190cc3c0SDavid Woodhouse     }
808190cc3c0SDavid Woodhouse 
809190cc3c0SDavid Woodhouse     vcpu_info = kvm_xen_get_vcpu_info_hva(s->port_table[port].vcpu);
810190cc3c0SDavid Woodhouse     if (!vcpu_info) {
811190cc3c0SDavid Woodhouse         return -EINVAL;
812190cc3c0SDavid Woodhouse     }
813190cc3c0SDavid Woodhouse 
814190cc3c0SDavid Woodhouse     if (xen_is_long_mode()) {
815190cc3c0SDavid Woodhouse         return do_unmask_port_lm(s, port, do_unmask, shinfo, vcpu_info);
816190cc3c0SDavid Woodhouse     } else {
817190cc3c0SDavid Woodhouse         return do_unmask_port_compat(s, port, do_unmask, shinfo, vcpu_info);
818190cc3c0SDavid Woodhouse     }
819190cc3c0SDavid Woodhouse }
820190cc3c0SDavid Woodhouse 
do_set_port_lm(XenEvtchnState * s,evtchn_port_t port,struct shared_info * shinfo,struct vcpu_info * vcpu_info)821cf7679abSDavid Woodhouse static int do_set_port_lm(XenEvtchnState *s, evtchn_port_t port,
822cf7679abSDavid Woodhouse                           struct shared_info *shinfo,
823cf7679abSDavid Woodhouse                           struct vcpu_info *vcpu_info)
824cf7679abSDavid Woodhouse {
825cf7679abSDavid Woodhouse     const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
826cf7679abSDavid Woodhouse     typeof(shinfo->evtchn_pending[0]) mask;
827cf7679abSDavid Woodhouse     int idx = port / bits_per_word;
828cf7679abSDavid Woodhouse     int offset = port % bits_per_word;
829cf7679abSDavid Woodhouse 
830cf7679abSDavid Woodhouse     mask = 1UL << offset;
831cf7679abSDavid Woodhouse 
832cf7679abSDavid Woodhouse     if (idx >= bits_per_word) {
833cf7679abSDavid Woodhouse         return -EINVAL;
834cf7679abSDavid Woodhouse     }
835cf7679abSDavid Woodhouse 
836cf7679abSDavid Woodhouse     /* Update the pending bit itself. If it was already set, we're done. */
837cf7679abSDavid Woodhouse     if (qatomic_fetch_or(&shinfo->evtchn_pending[idx], mask) & mask) {
838cf7679abSDavid Woodhouse         return 0;
839cf7679abSDavid Woodhouse     }
840cf7679abSDavid Woodhouse 
841cf7679abSDavid Woodhouse     /* Check if it's masked. */
842cf7679abSDavid Woodhouse     if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
843cf7679abSDavid Woodhouse         return 0;
844cf7679abSDavid Woodhouse     }
845cf7679abSDavid Woodhouse 
846cf7679abSDavid Woodhouse     /* Now on to the vcpu_info evtchn_pending_sel index... */
847cf7679abSDavid Woodhouse     mask = 1UL << idx;
848cf7679abSDavid Woodhouse 
849cf7679abSDavid Woodhouse     /* If a port in this word was already pending for this vCPU, all done. */
850cf7679abSDavid Woodhouse     if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
851cf7679abSDavid Woodhouse         return 0;
852cf7679abSDavid Woodhouse     }
853cf7679abSDavid Woodhouse 
854cf7679abSDavid Woodhouse     /* Set evtchn_upcall_pending for this vCPU */
855cf7679abSDavid Woodhouse     if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
856cf7679abSDavid Woodhouse         return 0;
857cf7679abSDavid Woodhouse     }
858cf7679abSDavid Woodhouse 
859cf7679abSDavid Woodhouse     inject_callback(s, s->port_table[port].vcpu);
860cf7679abSDavid Woodhouse 
861cf7679abSDavid Woodhouse     return 0;
862cf7679abSDavid Woodhouse }
863cf7679abSDavid Woodhouse 
do_set_port_compat(XenEvtchnState * s,evtchn_port_t port,struct compat_shared_info * shinfo,struct compat_vcpu_info * vcpu_info)864cf7679abSDavid Woodhouse static int do_set_port_compat(XenEvtchnState *s, evtchn_port_t port,
865cf7679abSDavid Woodhouse                               struct compat_shared_info *shinfo,
866cf7679abSDavid Woodhouse                               struct compat_vcpu_info *vcpu_info)
867cf7679abSDavid Woodhouse {
868cf7679abSDavid Woodhouse     const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
869cf7679abSDavid Woodhouse     typeof(shinfo->evtchn_pending[0]) mask;
870cf7679abSDavid Woodhouse     int idx = port / bits_per_word;
871cf7679abSDavid Woodhouse     int offset = port % bits_per_word;
872cf7679abSDavid Woodhouse 
873cf7679abSDavid Woodhouse     mask = 1UL << offset;
874cf7679abSDavid Woodhouse 
875cf7679abSDavid Woodhouse     if (idx >= bits_per_word) {
876cf7679abSDavid Woodhouse         return -EINVAL;
877cf7679abSDavid Woodhouse     }
878cf7679abSDavid Woodhouse 
879cf7679abSDavid Woodhouse     /* Update the pending bit itself. If it was already set, we're done. */
880cf7679abSDavid Woodhouse     if (qatomic_fetch_or(&shinfo->evtchn_pending[idx], mask) & mask) {
881cf7679abSDavid Woodhouse         return 0;
882cf7679abSDavid Woodhouse     }
883cf7679abSDavid Woodhouse 
884cf7679abSDavid Woodhouse     /* Check if it's masked. */
885cf7679abSDavid Woodhouse     if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
886cf7679abSDavid Woodhouse         return 0;
887cf7679abSDavid Woodhouse     }
888cf7679abSDavid Woodhouse 
889cf7679abSDavid Woodhouse     /* Now on to the vcpu_info evtchn_pending_sel index... */
890cf7679abSDavid Woodhouse     mask = 1UL << idx;
891cf7679abSDavid Woodhouse 
892cf7679abSDavid Woodhouse     /* If a port in this word was already pending for this vCPU, all done. */
893cf7679abSDavid Woodhouse     if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
894cf7679abSDavid Woodhouse         return 0;
895cf7679abSDavid Woodhouse     }
896cf7679abSDavid Woodhouse 
897cf7679abSDavid Woodhouse     /* Set evtchn_upcall_pending for this vCPU */
898cf7679abSDavid Woodhouse     if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
899cf7679abSDavid Woodhouse         return 0;
900cf7679abSDavid Woodhouse     }
901cf7679abSDavid Woodhouse 
902cf7679abSDavid Woodhouse     inject_callback(s, s->port_table[port].vcpu);
903cf7679abSDavid Woodhouse 
904cf7679abSDavid Woodhouse     return 0;
905cf7679abSDavid Woodhouse }
906cf7679abSDavid Woodhouse 
set_port_pending(XenEvtchnState * s,evtchn_port_t port)907cf7679abSDavid Woodhouse static int set_port_pending(XenEvtchnState *s, evtchn_port_t port)
908cf7679abSDavid Woodhouse {
909cf7679abSDavid Woodhouse     void *vcpu_info, *shinfo;
910cf7679abSDavid Woodhouse 
911cf7679abSDavid Woodhouse     if (s->port_table[port].type == EVTCHNSTAT_closed) {
912cf7679abSDavid Woodhouse         return -EINVAL;
913cf7679abSDavid Woodhouse     }
914cf7679abSDavid Woodhouse 
915cf7679abSDavid Woodhouse     if (s->evtchn_in_kernel) {
916cf7679abSDavid Woodhouse         XenEvtchnPort *p = &s->port_table[port];
917cf7679abSDavid Woodhouse         CPUState *cpu = qemu_get_cpu(p->vcpu);
918cf7679abSDavid Woodhouse         struct kvm_irq_routing_xen_evtchn evt;
919cf7679abSDavid Woodhouse 
920cf7679abSDavid Woodhouse         if (!cpu) {
921cf7679abSDavid Woodhouse             return 0;
922cf7679abSDavid Woodhouse         }
923cf7679abSDavid Woodhouse 
924cf7679abSDavid Woodhouse         evt.port = port;
925cf7679abSDavid Woodhouse         evt.vcpu = kvm_arch_vcpu_id(cpu);
926cf7679abSDavid Woodhouse         evt.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
927cf7679abSDavid Woodhouse 
928cf7679abSDavid Woodhouse         return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_EVTCHN_SEND, &evt);
929cf7679abSDavid Woodhouse     }
930cf7679abSDavid Woodhouse 
931cf7679abSDavid Woodhouse     shinfo = xen_overlay_get_shinfo_ptr();
932cf7679abSDavid Woodhouse     if (!shinfo) {
933cf7679abSDavid Woodhouse         return -ENOTSUP;
934cf7679abSDavid Woodhouse     }
935cf7679abSDavid Woodhouse 
936cf7679abSDavid Woodhouse     vcpu_info = kvm_xen_get_vcpu_info_hva(s->port_table[port].vcpu);
937cf7679abSDavid Woodhouse     if (!vcpu_info) {
938cf7679abSDavid Woodhouse         return -EINVAL;
939cf7679abSDavid Woodhouse     }
940cf7679abSDavid Woodhouse 
941cf7679abSDavid Woodhouse     if (xen_is_long_mode()) {
942cf7679abSDavid Woodhouse         return do_set_port_lm(s, port, shinfo, vcpu_info);
943cf7679abSDavid Woodhouse     } else {
944cf7679abSDavid Woodhouse         return do_set_port_compat(s, port, shinfo, vcpu_info);
945cf7679abSDavid Woodhouse     }
946cf7679abSDavid Woodhouse }
947cf7679abSDavid Woodhouse 
clear_port_pending(XenEvtchnState * s,evtchn_port_t port)94883eb5811SDavid Woodhouse static int clear_port_pending(XenEvtchnState *s, evtchn_port_t port)
94983eb5811SDavid Woodhouse {
95083eb5811SDavid Woodhouse     void *p = xen_overlay_get_shinfo_ptr();
95183eb5811SDavid Woodhouse 
95283eb5811SDavid Woodhouse     if (!p) {
95383eb5811SDavid Woodhouse         return -ENOTSUP;
95483eb5811SDavid Woodhouse     }
95583eb5811SDavid Woodhouse 
95683eb5811SDavid Woodhouse     if (xen_is_long_mode()) {
95783eb5811SDavid Woodhouse         struct shared_info *shinfo = p;
95883eb5811SDavid Woodhouse         const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
95983eb5811SDavid Woodhouse         typeof(shinfo->evtchn_pending[0]) mask;
96083eb5811SDavid Woodhouse         int idx = port / bits_per_word;
96183eb5811SDavid Woodhouse         int offset = port % bits_per_word;
96283eb5811SDavid Woodhouse 
96383eb5811SDavid Woodhouse         mask = 1UL << offset;
96483eb5811SDavid Woodhouse 
96583eb5811SDavid Woodhouse         qatomic_fetch_and(&shinfo->evtchn_pending[idx], ~mask);
96683eb5811SDavid Woodhouse     } else {
96783eb5811SDavid Woodhouse         struct compat_shared_info *shinfo = p;
96883eb5811SDavid Woodhouse         const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
96983eb5811SDavid Woodhouse         typeof(shinfo->evtchn_pending[0]) mask;
97083eb5811SDavid Woodhouse         int idx = port / bits_per_word;
97183eb5811SDavid Woodhouse         int offset = port % bits_per_word;
97283eb5811SDavid Woodhouse 
97383eb5811SDavid Woodhouse         mask = 1UL << offset;
97483eb5811SDavid Woodhouse 
97583eb5811SDavid Woodhouse         qatomic_fetch_and(&shinfo->evtchn_pending[idx], ~mask);
97683eb5811SDavid Woodhouse     }
97783eb5811SDavid Woodhouse     return 0;
97883eb5811SDavid Woodhouse }
97983eb5811SDavid Woodhouse 
free_port(XenEvtchnState * s,evtchn_port_t port)98083eb5811SDavid Woodhouse static void free_port(XenEvtchnState *s, evtchn_port_t port)
98183eb5811SDavid Woodhouse {
98283eb5811SDavid Woodhouse     s->port_table[port].type = EVTCHNSTAT_closed;
983be155098SDavid Woodhouse     s->port_table[port].u.val = 0;
98483eb5811SDavid Woodhouse     s->port_table[port].vcpu = 0;
98583eb5811SDavid Woodhouse 
98683eb5811SDavid Woodhouse     if (s->nr_ports == port + 1) {
98783eb5811SDavid Woodhouse         do {
98883eb5811SDavid Woodhouse             s->nr_ports--;
98983eb5811SDavid Woodhouse         } while (s->nr_ports &&
99083eb5811SDavid Woodhouse                  s->port_table[s->nr_ports - 1].type == EVTCHNSTAT_closed);
99183eb5811SDavid Woodhouse     }
99283eb5811SDavid Woodhouse 
99383eb5811SDavid Woodhouse     /* Clear pending event to avoid unexpected behavior on re-bind. */
99483eb5811SDavid Woodhouse     clear_port_pending(s, port);
99583eb5811SDavid Woodhouse }
99683eb5811SDavid Woodhouse 
allocate_port(XenEvtchnState * s,uint32_t vcpu,uint16_t type,uint16_t val,evtchn_port_t * port)997c723d4c1SDavid Woodhouse static int allocate_port(XenEvtchnState *s, uint32_t vcpu, uint16_t type,
998c723d4c1SDavid Woodhouse                          uint16_t val, evtchn_port_t *port)
999c723d4c1SDavid Woodhouse {
1000c723d4c1SDavid Woodhouse     evtchn_port_t p = 1;
1001c723d4c1SDavid Woodhouse 
1002c723d4c1SDavid Woodhouse     for (p = 1; valid_port(p); p++) {
1003c723d4c1SDavid Woodhouse         if (s->port_table[p].type == EVTCHNSTAT_closed) {
1004c723d4c1SDavid Woodhouse             s->port_table[p].vcpu = vcpu;
1005c723d4c1SDavid Woodhouse             s->port_table[p].type = type;
1006be155098SDavid Woodhouse             s->port_table[p].u.val = val;
1007c723d4c1SDavid Woodhouse 
1008c723d4c1SDavid Woodhouse             *port = p;
1009c723d4c1SDavid Woodhouse 
1010c723d4c1SDavid Woodhouse             if (s->nr_ports < p + 1) {
1011c723d4c1SDavid Woodhouse                 s->nr_ports = p + 1;
1012c723d4c1SDavid Woodhouse             }
1013c723d4c1SDavid Woodhouse 
1014c723d4c1SDavid Woodhouse             return 0;
1015c723d4c1SDavid Woodhouse         }
1016c723d4c1SDavid Woodhouse     }
1017c723d4c1SDavid Woodhouse     return -ENOSPC;
1018c723d4c1SDavid Woodhouse }
1019c723d4c1SDavid Woodhouse 
virq_is_global(uint32_t virq)1020c723d4c1SDavid Woodhouse static bool virq_is_global(uint32_t virq)
1021c723d4c1SDavid Woodhouse {
1022c723d4c1SDavid Woodhouse     switch (virq) {
1023c723d4c1SDavid Woodhouse     case VIRQ_TIMER:
1024c723d4c1SDavid Woodhouse     case VIRQ_DEBUG:
1025c723d4c1SDavid Woodhouse     case VIRQ_XENOPROF:
1026c723d4c1SDavid Woodhouse     case VIRQ_XENPMU:
1027c723d4c1SDavid Woodhouse         return false;
1028c723d4c1SDavid Woodhouse 
1029c723d4c1SDavid Woodhouse     default:
1030c723d4c1SDavid Woodhouse         return true;
1031c723d4c1SDavid Woodhouse     }
1032c723d4c1SDavid Woodhouse }
1033c723d4c1SDavid Woodhouse 
close_port(XenEvtchnState * s,evtchn_port_t port,bool * flush_kvm_routes)10346096cf78SDavid Woodhouse static int close_port(XenEvtchnState *s, evtchn_port_t port,
10356096cf78SDavid Woodhouse                       bool *flush_kvm_routes)
103683eb5811SDavid Woodhouse {
103783eb5811SDavid Woodhouse     XenEvtchnPort *p = &s->port_table[port];
103883eb5811SDavid Woodhouse 
10396096cf78SDavid Woodhouse     /* Because it *might* be a PIRQ port */
1040195801d7SStefan Hajnoczi     assert(bql_locked());
10416096cf78SDavid Woodhouse 
104283eb5811SDavid Woodhouse     switch (p->type) {
104383eb5811SDavid Woodhouse     case EVTCHNSTAT_closed:
104483eb5811SDavid Woodhouse         return -ENOENT;
104583eb5811SDavid Woodhouse 
1046aa98ee38SDavid Woodhouse     case EVTCHNSTAT_pirq:
1047be155098SDavid Woodhouse         s->pirq[p->u.pirq].port = 0;
1048be155098SDavid Woodhouse         if (s->pirq[p->u.pirq].is_translated) {
10496096cf78SDavid Woodhouse             *flush_kvm_routes = true;
10506096cf78SDavid Woodhouse         }
1051aa98ee38SDavid Woodhouse         break;
1052aa98ee38SDavid Woodhouse 
1053c723d4c1SDavid Woodhouse     case EVTCHNSTAT_virq:
1054be155098SDavid Woodhouse         kvm_xen_set_vcpu_virq(virq_is_global(p->u.virq) ? 0 : p->vcpu,
1055be155098SDavid Woodhouse                               p->u.virq, 0);
1056c723d4c1SDavid Woodhouse         break;
1057c723d4c1SDavid Woodhouse 
1058f5417856SDavid Woodhouse     case EVTCHNSTAT_ipi:
1059f5417856SDavid Woodhouse         if (s->evtchn_in_kernel) {
1060f5417856SDavid Woodhouse             deassign_kernel_port(port);
1061f5417856SDavid Woodhouse         }
1062f5417856SDavid Woodhouse         break;
1063f5417856SDavid Woodhouse 
106484327881SDavid Woodhouse     case EVTCHNSTAT_interdomain:
1065be155098SDavid Woodhouse         if (p->u.interdomain.to_qemu) {
1066be155098SDavid Woodhouse             uint16_t be_port = p->u.interdomain.port;
1067794fba23SDavid Woodhouse             struct xenevtchn_handle *xc = s->be_handles[be_port];
1068794fba23SDavid Woodhouse             if (xc) {
1069794fba23SDavid Woodhouse                 if (kvm_xen_has_cap(EVTCHN_SEND)) {
1070794fba23SDavid Woodhouse                     deassign_kernel_port(port);
1071794fba23SDavid Woodhouse                 }
1072794fba23SDavid Woodhouse                 xc->guest_port = 0;
1073794fba23SDavid Woodhouse             }
107484327881SDavid Woodhouse         } else {
107584327881SDavid Woodhouse             /* Loopback interdomain */
1076be155098SDavid Woodhouse             XenEvtchnPort *rp = &s->port_table[p->u.interdomain.port];
1077be155098SDavid Woodhouse             if (!valid_port(p->u.interdomain.port) ||
1078be155098SDavid Woodhouse                 rp->u.interdomain.port != port ||
107984327881SDavid Woodhouse                 rp->type != EVTCHNSTAT_interdomain) {
108084327881SDavid Woodhouse                 error_report("Inconsistent state for interdomain unbind");
108184327881SDavid Woodhouse             } else {
108284327881SDavid Woodhouse                 /* Set the other end back to unbound */
108384327881SDavid Woodhouse                 rp->type = EVTCHNSTAT_unbound;
1084be155098SDavid Woodhouse                 rp->u.interdomain.port = 0;
108584327881SDavid Woodhouse             }
108684327881SDavid Woodhouse         }
108784327881SDavid Woodhouse         break;
108884327881SDavid Woodhouse 
108983eb5811SDavid Woodhouse     default:
109083eb5811SDavid Woodhouse         break;
109183eb5811SDavid Woodhouse     }
109283eb5811SDavid Woodhouse 
109383eb5811SDavid Woodhouse     free_port(s, port);
109483eb5811SDavid Woodhouse     return 0;
109583eb5811SDavid Woodhouse }
109683eb5811SDavid Woodhouse 
xen_evtchn_soft_reset(void)1097a15b1097SDavid Woodhouse int xen_evtchn_soft_reset(void)
1098a15b1097SDavid Woodhouse {
1099a15b1097SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
1100*95a36455SArtem Chernyshev     bool flush_kvm_routes = false;
1101a15b1097SDavid Woodhouse     int i;
1102a15b1097SDavid Woodhouse 
1103a15b1097SDavid Woodhouse     if (!s) {
1104a15b1097SDavid Woodhouse         return -ENOTSUP;
1105a15b1097SDavid Woodhouse     }
1106a15b1097SDavid Woodhouse 
1107195801d7SStefan Hajnoczi     assert(bql_locked());
1108a15b1097SDavid Woodhouse 
11096096cf78SDavid Woodhouse     qemu_mutex_lock(&s->port_lock);
1110a15b1097SDavid Woodhouse 
1111a15b1097SDavid Woodhouse     for (i = 0; i < s->nr_ports; i++) {
11126096cf78SDavid Woodhouse         close_port(s, i, &flush_kvm_routes);
11136096cf78SDavid Woodhouse     }
11146096cf78SDavid Woodhouse 
11156096cf78SDavid Woodhouse     qemu_mutex_unlock(&s->port_lock);
11166096cf78SDavid Woodhouse 
11176096cf78SDavid Woodhouse     if (flush_kvm_routes) {
11186096cf78SDavid Woodhouse         kvm_update_msi_routes_all(NULL, true, 0, 0);
1119a15b1097SDavid Woodhouse     }
1120a15b1097SDavid Woodhouse 
1121a15b1097SDavid Woodhouse     return 0;
1122a15b1097SDavid Woodhouse }
1123a15b1097SDavid Woodhouse 
xen_evtchn_reset_op(struct evtchn_reset * reset)1124a15b1097SDavid Woodhouse int xen_evtchn_reset_op(struct evtchn_reset *reset)
1125a15b1097SDavid Woodhouse {
1126a15b1097SDavid Woodhouse     if (reset->dom != DOMID_SELF && reset->dom != xen_domid) {
1127a15b1097SDavid Woodhouse         return -ESRCH;
1128a15b1097SDavid Woodhouse     }
1129a15b1097SDavid Woodhouse 
113032ead8e6SStefan Hajnoczi     BQL_LOCK_GUARD();
1131a15b1097SDavid Woodhouse     return xen_evtchn_soft_reset();
1132a15b1097SDavid Woodhouse }
1133a15b1097SDavid Woodhouse 
xen_evtchn_close_op(struct evtchn_close * close)113483eb5811SDavid Woodhouse int xen_evtchn_close_op(struct evtchn_close *close)
113583eb5811SDavid Woodhouse {
113683eb5811SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
11376096cf78SDavid Woodhouse     bool flush_kvm_routes = false;
113883eb5811SDavid Woodhouse     int ret;
113983eb5811SDavid Woodhouse 
114083eb5811SDavid Woodhouse     if (!s) {
114183eb5811SDavid Woodhouse         return -ENOTSUP;
114283eb5811SDavid Woodhouse     }
114383eb5811SDavid Woodhouse 
114483eb5811SDavid Woodhouse     if (!valid_port(close->port)) {
114583eb5811SDavid Woodhouse         return -EINVAL;
114683eb5811SDavid Woodhouse     }
114783eb5811SDavid Woodhouse 
114832ead8e6SStefan Hajnoczi     BQL_LOCK_GUARD();
114983eb5811SDavid Woodhouse     qemu_mutex_lock(&s->port_lock);
115083eb5811SDavid Woodhouse 
11516096cf78SDavid Woodhouse     ret = close_port(s, close->port, &flush_kvm_routes);
115283eb5811SDavid Woodhouse 
115383eb5811SDavid Woodhouse     qemu_mutex_unlock(&s->port_lock);
115483eb5811SDavid Woodhouse 
11556096cf78SDavid Woodhouse     if (flush_kvm_routes) {
11566096cf78SDavid Woodhouse         kvm_update_msi_routes_all(NULL, true, 0, 0);
11576096cf78SDavid Woodhouse     }
11586096cf78SDavid Woodhouse 
115983eb5811SDavid Woodhouse     return ret;
116083eb5811SDavid Woodhouse }
1161190cc3c0SDavid Woodhouse 
xen_evtchn_unmask_op(struct evtchn_unmask * unmask)1162190cc3c0SDavid Woodhouse int xen_evtchn_unmask_op(struct evtchn_unmask *unmask)
1163190cc3c0SDavid Woodhouse {
1164190cc3c0SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
1165190cc3c0SDavid Woodhouse     int ret;
1166190cc3c0SDavid Woodhouse 
1167190cc3c0SDavid Woodhouse     if (!s) {
1168190cc3c0SDavid Woodhouse         return -ENOTSUP;
1169190cc3c0SDavid Woodhouse     }
1170190cc3c0SDavid Woodhouse 
1171190cc3c0SDavid Woodhouse     if (!valid_port(unmask->port)) {
1172190cc3c0SDavid Woodhouse         return -EINVAL;
1173190cc3c0SDavid Woodhouse     }
1174190cc3c0SDavid Woodhouse 
1175190cc3c0SDavid Woodhouse     qemu_mutex_lock(&s->port_lock);
1176190cc3c0SDavid Woodhouse 
1177190cc3c0SDavid Woodhouse     ret = unmask_port(s, unmask->port, true);
1178190cc3c0SDavid Woodhouse 
1179190cc3c0SDavid Woodhouse     qemu_mutex_unlock(&s->port_lock);
1180190cc3c0SDavid Woodhouse 
1181190cc3c0SDavid Woodhouse     return ret;
1182190cc3c0SDavid Woodhouse }
1183c723d4c1SDavid Woodhouse 
xen_evtchn_bind_vcpu_op(struct evtchn_bind_vcpu * vcpu)118430667046SDavid Woodhouse int xen_evtchn_bind_vcpu_op(struct evtchn_bind_vcpu *vcpu)
118530667046SDavid Woodhouse {
118630667046SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
118730667046SDavid Woodhouse     XenEvtchnPort *p;
118830667046SDavid Woodhouse     int ret = -EINVAL;
118930667046SDavid Woodhouse 
119030667046SDavid Woodhouse     if (!s) {
119130667046SDavid Woodhouse         return -ENOTSUP;
119230667046SDavid Woodhouse     }
119330667046SDavid Woodhouse 
119430667046SDavid Woodhouse     if (!valid_port(vcpu->port)) {
119530667046SDavid Woodhouse         return -EINVAL;
119630667046SDavid Woodhouse     }
119730667046SDavid Woodhouse 
119830667046SDavid Woodhouse     if (!valid_vcpu(vcpu->vcpu)) {
119930667046SDavid Woodhouse         return -ENOENT;
120030667046SDavid Woodhouse     }
120130667046SDavid Woodhouse 
120230667046SDavid Woodhouse     qemu_mutex_lock(&s->port_lock);
120330667046SDavid Woodhouse 
120430667046SDavid Woodhouse     p = &s->port_table[vcpu->port];
120530667046SDavid Woodhouse 
120630667046SDavid Woodhouse     if (p->type == EVTCHNSTAT_interdomain ||
120730667046SDavid Woodhouse         p->type == EVTCHNSTAT_unbound ||
120830667046SDavid Woodhouse         p->type == EVTCHNSTAT_pirq ||
1209be155098SDavid Woodhouse         (p->type == EVTCHNSTAT_virq && virq_is_global(p->u.virq))) {
121030667046SDavid Woodhouse         /*
121130667046SDavid Woodhouse          * unmask_port() with do_unmask==false will just raise the event
121230667046SDavid Woodhouse          * on the new vCPU if the port was already pending.
121330667046SDavid Woodhouse          */
121430667046SDavid Woodhouse         p->vcpu = vcpu->vcpu;
121530667046SDavid Woodhouse         unmask_port(s, vcpu->port, false);
121630667046SDavid Woodhouse         ret = 0;
121730667046SDavid Woodhouse     }
121830667046SDavid Woodhouse 
121930667046SDavid Woodhouse     qemu_mutex_unlock(&s->port_lock);
122030667046SDavid Woodhouse 
122130667046SDavid Woodhouse     return ret;
122230667046SDavid Woodhouse }
122330667046SDavid Woodhouse 
xen_evtchn_bind_virq_op(struct evtchn_bind_virq * virq)1224c723d4c1SDavid Woodhouse int xen_evtchn_bind_virq_op(struct evtchn_bind_virq *virq)
1225c723d4c1SDavid Woodhouse {
1226c723d4c1SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
1227c723d4c1SDavid Woodhouse     int ret;
1228c723d4c1SDavid Woodhouse 
1229c723d4c1SDavid Woodhouse     if (!s) {
1230c723d4c1SDavid Woodhouse         return -ENOTSUP;
1231c723d4c1SDavid Woodhouse     }
1232c723d4c1SDavid Woodhouse 
1233c723d4c1SDavid Woodhouse     if (virq->virq >= NR_VIRQS) {
1234c723d4c1SDavid Woodhouse         return -EINVAL;
1235c723d4c1SDavid Woodhouse     }
1236c723d4c1SDavid Woodhouse 
1237c723d4c1SDavid Woodhouse     /* Global VIRQ must be allocated on vCPU0 first */
1238c723d4c1SDavid Woodhouse     if (virq_is_global(virq->virq) && virq->vcpu != 0) {
1239c723d4c1SDavid Woodhouse         return -EINVAL;
1240c723d4c1SDavid Woodhouse     }
1241c723d4c1SDavid Woodhouse 
1242c723d4c1SDavid Woodhouse     if (!valid_vcpu(virq->vcpu)) {
1243c723d4c1SDavid Woodhouse         return -ENOENT;
1244c723d4c1SDavid Woodhouse     }
1245c723d4c1SDavid Woodhouse 
1246c723d4c1SDavid Woodhouse     qemu_mutex_lock(&s->port_lock);
1247c723d4c1SDavid Woodhouse 
1248c723d4c1SDavid Woodhouse     ret = allocate_port(s, virq->vcpu, EVTCHNSTAT_virq, virq->virq,
1249c723d4c1SDavid Woodhouse                         &virq->port);
1250c723d4c1SDavid Woodhouse     if (!ret) {
1251c723d4c1SDavid Woodhouse         ret = kvm_xen_set_vcpu_virq(virq->vcpu, virq->virq, virq->port);
1252c723d4c1SDavid Woodhouse         if (ret) {
1253c723d4c1SDavid Woodhouse             free_port(s, virq->port);
1254c723d4c1SDavid Woodhouse         }
1255c723d4c1SDavid Woodhouse     }
1256c723d4c1SDavid Woodhouse 
1257c723d4c1SDavid Woodhouse     qemu_mutex_unlock(&s->port_lock);
1258c723d4c1SDavid Woodhouse 
1259c723d4c1SDavid Woodhouse     return ret;
1260c723d4c1SDavid Woodhouse }
1261f5417856SDavid Woodhouse 
xen_evtchn_bind_pirq_op(struct evtchn_bind_pirq * pirq)1262aa98ee38SDavid Woodhouse int xen_evtchn_bind_pirq_op(struct evtchn_bind_pirq *pirq)
1263aa98ee38SDavid Woodhouse {
1264aa98ee38SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
1265aa98ee38SDavid Woodhouse     int ret;
1266aa98ee38SDavid Woodhouse 
1267aa98ee38SDavid Woodhouse     if (!s) {
1268aa98ee38SDavid Woodhouse         return -ENOTSUP;
1269aa98ee38SDavid Woodhouse     }
1270aa98ee38SDavid Woodhouse 
1271aa98ee38SDavid Woodhouse     if (pirq->pirq >= s->nr_pirqs) {
1272aa98ee38SDavid Woodhouse         return -EINVAL;
1273aa98ee38SDavid Woodhouse     }
1274aa98ee38SDavid Woodhouse 
127532ead8e6SStefan Hajnoczi     BQL_LOCK_GUARD();
1276aa98ee38SDavid Woodhouse 
1277aa98ee38SDavid Woodhouse     if (s->pirq[pirq->pirq].port) {
1278aa98ee38SDavid Woodhouse         return -EBUSY;
1279aa98ee38SDavid Woodhouse     }
1280aa98ee38SDavid Woodhouse 
12816096cf78SDavid Woodhouse     qemu_mutex_lock(&s->port_lock);
12826096cf78SDavid Woodhouse 
1283aa98ee38SDavid Woodhouse     ret = allocate_port(s, 0, EVTCHNSTAT_pirq, pirq->pirq,
1284aa98ee38SDavid Woodhouse                         &pirq->port);
1285aa98ee38SDavid Woodhouse     if (ret) {
12866096cf78SDavid Woodhouse         qemu_mutex_unlock(&s->port_lock);
1287aa98ee38SDavid Woodhouse         return ret;
1288aa98ee38SDavid Woodhouse     }
1289aa98ee38SDavid Woodhouse 
1290aa98ee38SDavid Woodhouse     s->pirq[pirq->pirq].port = pirq->port;
1291aa98ee38SDavid Woodhouse     trace_kvm_xen_bind_pirq(pirq->pirq, pirq->port);
1292aa98ee38SDavid Woodhouse 
12936096cf78SDavid Woodhouse     qemu_mutex_unlock(&s->port_lock);
12946096cf78SDavid Woodhouse 
12956096cf78SDavid Woodhouse     /*
12966096cf78SDavid Woodhouse      * Need to do the unmask outside port_lock because it may call
12976096cf78SDavid Woodhouse      * back into the MSI translate function.
12986096cf78SDavid Woodhouse      */
12996096cf78SDavid Woodhouse     if (s->pirq[pirq->pirq].gsi == IRQ_MSI_EMU) {
13006096cf78SDavid Woodhouse         if (s->pirq[pirq->pirq].is_masked) {
13016096cf78SDavid Woodhouse             PCIDevice *dev = s->pirq[pirq->pirq].dev;
13026096cf78SDavid Woodhouse             int vector = s->pirq[pirq->pirq].vector;
13036096cf78SDavid Woodhouse             char *dev_path = qdev_get_dev_path(DEVICE(dev));
13046096cf78SDavid Woodhouse 
13056096cf78SDavid Woodhouse             trace_kvm_xen_unmask_pirq(pirq->pirq, dev_path, vector);
13066096cf78SDavid Woodhouse             g_free(dev_path);
13076096cf78SDavid Woodhouse 
13086096cf78SDavid Woodhouse             if (s->pirq[pirq->pirq].is_msix) {
13096096cf78SDavid Woodhouse                 msix_set_mask(dev, vector, false);
13106096cf78SDavid Woodhouse             } else {
13116096cf78SDavid Woodhouse                 msi_set_mask(dev, vector, false, NULL);
13126096cf78SDavid Woodhouse             }
13136096cf78SDavid Woodhouse         } else if (s->pirq[pirq->pirq].is_translated) {
13146096cf78SDavid Woodhouse             /*
13156096cf78SDavid Woodhouse              * If KVM had attempted to translate this one before, make it try
13166096cf78SDavid Woodhouse              * again. If we unmasked, then the notifier on the MSI(-X) vector
13176096cf78SDavid Woodhouse              * will already have had the same effect.
13186096cf78SDavid Woodhouse              */
13196096cf78SDavid Woodhouse             kvm_update_msi_routes_all(NULL, true, 0, 0);
13206096cf78SDavid Woodhouse         }
13216096cf78SDavid Woodhouse     }
13226096cf78SDavid Woodhouse 
1323aa98ee38SDavid Woodhouse     return ret;
1324aa98ee38SDavid Woodhouse }
1325aa98ee38SDavid Woodhouse 
xen_evtchn_bind_ipi_op(struct evtchn_bind_ipi * ipi)1326f5417856SDavid Woodhouse int xen_evtchn_bind_ipi_op(struct evtchn_bind_ipi *ipi)
1327f5417856SDavid Woodhouse {
1328f5417856SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
1329f5417856SDavid Woodhouse     int ret;
1330f5417856SDavid Woodhouse 
1331f5417856SDavid Woodhouse     if (!s) {
1332f5417856SDavid Woodhouse         return -ENOTSUP;
1333f5417856SDavid Woodhouse     }
1334f5417856SDavid Woodhouse 
1335f5417856SDavid Woodhouse     if (!valid_vcpu(ipi->vcpu)) {
1336f5417856SDavid Woodhouse         return -ENOENT;
1337f5417856SDavid Woodhouse     }
1338f5417856SDavid Woodhouse 
1339f5417856SDavid Woodhouse     qemu_mutex_lock(&s->port_lock);
1340f5417856SDavid Woodhouse 
1341f5417856SDavid Woodhouse     ret = allocate_port(s, ipi->vcpu, EVTCHNSTAT_ipi, 0, &ipi->port);
1342f5417856SDavid Woodhouse     if (!ret && s->evtchn_in_kernel) {
1343f5417856SDavid Woodhouse         assign_kernel_port(EVTCHNSTAT_ipi, ipi->port, ipi->vcpu);
1344f5417856SDavid Woodhouse     }
1345f5417856SDavid Woodhouse 
1346f5417856SDavid Woodhouse     qemu_mutex_unlock(&s->port_lock);
1347f5417856SDavid Woodhouse 
1348f5417856SDavid Woodhouse     return ret;
1349f5417856SDavid Woodhouse }
1350cf7679abSDavid Woodhouse 
xen_evtchn_bind_interdomain_op(struct evtchn_bind_interdomain * interdomain)135184327881SDavid Woodhouse int xen_evtchn_bind_interdomain_op(struct evtchn_bind_interdomain *interdomain)
135284327881SDavid Woodhouse {
135384327881SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
135484327881SDavid Woodhouse     int ret;
135584327881SDavid Woodhouse 
135684327881SDavid Woodhouse     if (!s) {
135784327881SDavid Woodhouse         return -ENOTSUP;
135884327881SDavid Woodhouse     }
135984327881SDavid Woodhouse 
1360be155098SDavid Woodhouse     if (interdomain->remote_dom != DOMID_QEMU &&
1361be155098SDavid Woodhouse         interdomain->remote_dom != DOMID_SELF &&
1362be155098SDavid Woodhouse         interdomain->remote_dom != xen_domid) {
136384327881SDavid Woodhouse         return -ESRCH;
136484327881SDavid Woodhouse     }
136584327881SDavid Woodhouse 
136684327881SDavid Woodhouse     if (!valid_port(interdomain->remote_port)) {
136784327881SDavid Woodhouse         return -EINVAL;
136884327881SDavid Woodhouse     }
136984327881SDavid Woodhouse 
137084327881SDavid Woodhouse     qemu_mutex_lock(&s->port_lock);
137184327881SDavid Woodhouse 
137284327881SDavid Woodhouse     /* The newly allocated port starts out as unbound */
1373be155098SDavid Woodhouse     ret = allocate_port(s, 0, EVTCHNSTAT_unbound, 0, &interdomain->local_port);
1374be155098SDavid Woodhouse 
137584327881SDavid Woodhouse     if (ret) {
137684327881SDavid Woodhouse         goto out;
137784327881SDavid Woodhouse     }
137884327881SDavid Woodhouse 
137984327881SDavid Woodhouse     if (interdomain->remote_dom == DOMID_QEMU) {
1380794fba23SDavid Woodhouse         struct xenevtchn_handle *xc = s->be_handles[interdomain->remote_port];
1381794fba23SDavid Woodhouse         XenEvtchnPort *lp = &s->port_table[interdomain->local_port];
1382794fba23SDavid Woodhouse 
1383794fba23SDavid Woodhouse         if (!xc) {
1384794fba23SDavid Woodhouse             ret = -ENOENT;
1385794fba23SDavid Woodhouse             goto out_free_port;
1386794fba23SDavid Woodhouse         }
1387794fba23SDavid Woodhouse 
1388794fba23SDavid Woodhouse         if (xc->guest_port) {
1389794fba23SDavid Woodhouse             ret = -EBUSY;
1390794fba23SDavid Woodhouse             goto out_free_port;
1391794fba23SDavid Woodhouse         }
1392794fba23SDavid Woodhouse 
1393794fba23SDavid Woodhouse         assert(xc->be_port == interdomain->remote_port);
1394794fba23SDavid Woodhouse         xc->guest_port = interdomain->local_port;
1395794fba23SDavid Woodhouse         if (kvm_xen_has_cap(EVTCHN_SEND)) {
1396794fba23SDavid Woodhouse             assign_kernel_eventfd(lp->type, xc->guest_port, xc->fd);
1397794fba23SDavid Woodhouse         }
1398794fba23SDavid Woodhouse         lp->type = EVTCHNSTAT_interdomain;
1399be155098SDavid Woodhouse         lp->u.interdomain.to_qemu = 1;
1400be155098SDavid Woodhouse         lp->u.interdomain.port = interdomain->remote_port;
1401794fba23SDavid Woodhouse         ret = 0;
140284327881SDavid Woodhouse     } else {
140384327881SDavid Woodhouse         /* Loopback */
140484327881SDavid Woodhouse         XenEvtchnPort *rp = &s->port_table[interdomain->remote_port];
140584327881SDavid Woodhouse         XenEvtchnPort *lp = &s->port_table[interdomain->local_port];
140684327881SDavid Woodhouse 
140775a87af9SDavid Woodhouse         /*
1408be155098SDavid Woodhouse          * The 'remote' port for loopback must be an unbound port allocated
1409be155098SDavid Woodhouse          * for communication with the local domain, and must *not* be the
1410be155098SDavid Woodhouse          * port that was just allocated for the local end.
141175a87af9SDavid Woodhouse          */
141275a87af9SDavid Woodhouse         if (interdomain->local_port != interdomain->remote_port &&
1413be155098SDavid Woodhouse             rp->type == EVTCHNSTAT_unbound && !rp->u.interdomain.to_qemu) {
141475a87af9SDavid Woodhouse 
141584327881SDavid Woodhouse             rp->type = EVTCHNSTAT_interdomain;
1416be155098SDavid Woodhouse             rp->u.interdomain.port = interdomain->local_port;
141784327881SDavid Woodhouse 
141884327881SDavid Woodhouse             lp->type = EVTCHNSTAT_interdomain;
1419be155098SDavid Woodhouse             lp->u.interdomain.port = interdomain->remote_port;
142084327881SDavid Woodhouse         } else {
142184327881SDavid Woodhouse             ret = -EINVAL;
142284327881SDavid Woodhouse         }
142384327881SDavid Woodhouse     }
142484327881SDavid Woodhouse 
1425794fba23SDavid Woodhouse  out_free_port:
142684327881SDavid Woodhouse     if (ret) {
142784327881SDavid Woodhouse         free_port(s, interdomain->local_port);
142884327881SDavid Woodhouse     }
142984327881SDavid Woodhouse  out:
143084327881SDavid Woodhouse     qemu_mutex_unlock(&s->port_lock);
143184327881SDavid Woodhouse 
143284327881SDavid Woodhouse     return ret;
143384327881SDavid Woodhouse 
143484327881SDavid Woodhouse }
xen_evtchn_alloc_unbound_op(struct evtchn_alloc_unbound * alloc)1435e1db61b8SDavid Woodhouse int xen_evtchn_alloc_unbound_op(struct evtchn_alloc_unbound *alloc)
1436e1db61b8SDavid Woodhouse {
1437e1db61b8SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
1438e1db61b8SDavid Woodhouse     int ret;
1439e1db61b8SDavid Woodhouse 
1440e1db61b8SDavid Woodhouse     if (!s) {
1441e1db61b8SDavid Woodhouse         return -ENOTSUP;
1442e1db61b8SDavid Woodhouse     }
1443e1db61b8SDavid Woodhouse 
1444e1db61b8SDavid Woodhouse     if (alloc->dom != DOMID_SELF && alloc->dom != xen_domid) {
1445e1db61b8SDavid Woodhouse         return -ESRCH;
1446e1db61b8SDavid Woodhouse     }
1447e1db61b8SDavid Woodhouse 
1448be155098SDavid Woodhouse     if (alloc->remote_dom != DOMID_QEMU &&
1449be155098SDavid Woodhouse         alloc->remote_dom != DOMID_SELF &&
1450be155098SDavid Woodhouse         alloc->remote_dom != xen_domid) {
1451e1db61b8SDavid Woodhouse         return -EPERM;
1452e1db61b8SDavid Woodhouse     }
1453e1db61b8SDavid Woodhouse 
1454e1db61b8SDavid Woodhouse     qemu_mutex_lock(&s->port_lock);
1455e1db61b8SDavid Woodhouse 
1456be155098SDavid Woodhouse     ret = allocate_port(s, 0, EVTCHNSTAT_unbound, 0, &alloc->port);
1457be155098SDavid Woodhouse 
1458be155098SDavid Woodhouse     if (!ret && alloc->remote_dom == DOMID_QEMU) {
1459be155098SDavid Woodhouse         XenEvtchnPort *p = &s->port_table[alloc->port];
1460be155098SDavid Woodhouse         p->u.interdomain.to_qemu = 1;
1461be155098SDavid Woodhouse     }
1462e1db61b8SDavid Woodhouse 
1463e1db61b8SDavid Woodhouse     qemu_mutex_unlock(&s->port_lock);
1464e1db61b8SDavid Woodhouse 
1465e1db61b8SDavid Woodhouse     return ret;
1466e1db61b8SDavid Woodhouse }
1467e1db61b8SDavid Woodhouse 
xen_evtchn_send_op(struct evtchn_send * send)1468cf7679abSDavid Woodhouse int xen_evtchn_send_op(struct evtchn_send *send)
1469cf7679abSDavid Woodhouse {
1470cf7679abSDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
1471cf7679abSDavid Woodhouse     XenEvtchnPort *p;
1472cf7679abSDavid Woodhouse     int ret = 0;
1473cf7679abSDavid Woodhouse 
1474cf7679abSDavid Woodhouse     if (!s) {
1475cf7679abSDavid Woodhouse         return -ENOTSUP;
1476cf7679abSDavid Woodhouse     }
1477cf7679abSDavid Woodhouse 
1478cf7679abSDavid Woodhouse     if (!valid_port(send->port)) {
1479cf7679abSDavid Woodhouse         return -EINVAL;
1480cf7679abSDavid Woodhouse     }
1481cf7679abSDavid Woodhouse 
1482cf7679abSDavid Woodhouse     qemu_mutex_lock(&s->port_lock);
1483cf7679abSDavid Woodhouse 
1484cf7679abSDavid Woodhouse     p = &s->port_table[send->port];
1485cf7679abSDavid Woodhouse 
1486cf7679abSDavid Woodhouse     switch (p->type) {
1487cf7679abSDavid Woodhouse     case EVTCHNSTAT_interdomain:
1488be155098SDavid Woodhouse         if (p->u.interdomain.to_qemu) {
1489cf7679abSDavid Woodhouse             /*
1490cf7679abSDavid Woodhouse              * This is an event from the guest to qemu itself, which is
1491794fba23SDavid Woodhouse              * serving as the driver domain.
1492cf7679abSDavid Woodhouse              */
1493be155098SDavid Woodhouse             uint16_t be_port = p->u.interdomain.port;
1494794fba23SDavid Woodhouse             struct xenevtchn_handle *xc = s->be_handles[be_port];
1495794fba23SDavid Woodhouse             if (xc) {
1496794fba23SDavid Woodhouse                 eventfd_write(xc->fd, 1);
1497794fba23SDavid Woodhouse                 ret = 0;
1498794fba23SDavid Woodhouse             } else {
1499794fba23SDavid Woodhouse                 ret = -ENOENT;
1500794fba23SDavid Woodhouse             }
1501cf7679abSDavid Woodhouse         } else {
1502cf7679abSDavid Woodhouse             /* Loopback interdomain ports; just a complex IPI */
1503be155098SDavid Woodhouse             set_port_pending(s, p->u.interdomain.port);
1504cf7679abSDavid Woodhouse         }
1505cf7679abSDavid Woodhouse         break;
1506cf7679abSDavid Woodhouse 
1507cf7679abSDavid Woodhouse     case EVTCHNSTAT_ipi:
1508cf7679abSDavid Woodhouse         set_port_pending(s, send->port);
1509cf7679abSDavid Woodhouse         break;
1510cf7679abSDavid Woodhouse 
1511cf7679abSDavid Woodhouse     case EVTCHNSTAT_unbound:
1512cf7679abSDavid Woodhouse         /* Xen will silently drop these */
1513cf7679abSDavid Woodhouse         break;
1514cf7679abSDavid Woodhouse 
1515cf7679abSDavid Woodhouse     default:
1516cf7679abSDavid Woodhouse         ret = -EINVAL;
1517cf7679abSDavid Woodhouse         break;
1518cf7679abSDavid Woodhouse     }
1519cf7679abSDavid Woodhouse 
1520cf7679abSDavid Woodhouse     qemu_mutex_unlock(&s->port_lock);
1521cf7679abSDavid Woodhouse 
1522cf7679abSDavid Woodhouse     return ret;
1523cf7679abSDavid Woodhouse }
1524cf7679abSDavid Woodhouse 
xen_evtchn_set_port(uint16_t port)1525b746a779SJoao Martins int xen_evtchn_set_port(uint16_t port)
1526b746a779SJoao Martins {
1527b746a779SJoao Martins     XenEvtchnState *s = xen_evtchn_singleton;
1528b746a779SJoao Martins     XenEvtchnPort *p;
1529b746a779SJoao Martins     int ret = -EINVAL;
1530b746a779SJoao Martins 
1531b746a779SJoao Martins     if (!s) {
1532b746a779SJoao Martins         return -ENOTSUP;
1533b746a779SJoao Martins     }
1534b746a779SJoao Martins 
1535b746a779SJoao Martins     if (!valid_port(port)) {
1536b746a779SJoao Martins         return -EINVAL;
1537b746a779SJoao Martins     }
1538b746a779SJoao Martins 
1539b746a779SJoao Martins     qemu_mutex_lock(&s->port_lock);
1540b746a779SJoao Martins 
1541b746a779SJoao Martins     p = &s->port_table[port];
1542b746a779SJoao Martins 
1543b746a779SJoao Martins     /* QEMU has no business sending to anything but these */
1544b746a779SJoao Martins     if (p->type == EVTCHNSTAT_virq ||
1545be155098SDavid Woodhouse         (p->type == EVTCHNSTAT_interdomain && p->u.interdomain.to_qemu)) {
1546b746a779SJoao Martins         set_port_pending(s, port);
1547b746a779SJoao Martins         ret = 0;
1548b746a779SJoao Martins     }
1549b746a779SJoao Martins 
1550b746a779SJoao Martins     qemu_mutex_unlock(&s->port_lock);
1551b746a779SJoao Martins 
1552b746a779SJoao Martins     return ret;
1553b746a779SJoao Martins }
1554b746a779SJoao Martins 
allocate_pirq(XenEvtchnState * s,int type,int gsi)1555aa98ee38SDavid Woodhouse static int allocate_pirq(XenEvtchnState *s, int type, int gsi)
1556aa98ee38SDavid Woodhouse {
1557aa98ee38SDavid Woodhouse     uint16_t pirq;
1558aa98ee38SDavid Woodhouse 
1559aa98ee38SDavid Woodhouse     /*
1560aa98ee38SDavid Woodhouse      * Preserve the allocation strategy that Xen has. It looks like
1561aa98ee38SDavid Woodhouse      * we *never* give out PIRQ 0-15, we give out 16-nr_irqs_gsi only
1562aa98ee38SDavid Woodhouse      * to GSIs (counting up from 16), and then we count backwards from
1563aa98ee38SDavid Woodhouse      * the top for MSIs or when the GSI space is exhausted.
1564aa98ee38SDavid Woodhouse      */
1565aa98ee38SDavid Woodhouse     if (type == MAP_PIRQ_TYPE_GSI) {
1566aa98ee38SDavid Woodhouse         for (pirq = 16 ; pirq < IOAPIC_NUM_PINS; pirq++) {
1567aa98ee38SDavid Woodhouse             if (pirq_inuse(s, pirq)) {
1568aa98ee38SDavid Woodhouse                 continue;
1569aa98ee38SDavid Woodhouse             }
1570aa98ee38SDavid Woodhouse 
1571aa98ee38SDavid Woodhouse             /* Found it */
1572aa98ee38SDavid Woodhouse             goto found;
1573aa98ee38SDavid Woodhouse         }
1574aa98ee38SDavid Woodhouse     }
1575aa98ee38SDavid Woodhouse     for (pirq = s->nr_pirqs - 1; pirq >= IOAPIC_NUM_PINS; pirq--) {
1576aa98ee38SDavid Woodhouse         /* Skip whole words at a time when they're full */
1577aa98ee38SDavid Woodhouse         if (pirq_inuse_word(s, pirq) == UINT64_MAX) {
1578aa98ee38SDavid Woodhouse             pirq &= ~63ULL;
1579aa98ee38SDavid Woodhouse             continue;
1580aa98ee38SDavid Woodhouse         }
1581aa98ee38SDavid Woodhouse         if (pirq_inuse(s, pirq)) {
1582aa98ee38SDavid Woodhouse             continue;
1583aa98ee38SDavid Woodhouse         }
1584aa98ee38SDavid Woodhouse 
1585aa98ee38SDavid Woodhouse         goto found;
1586aa98ee38SDavid Woodhouse     }
1587aa98ee38SDavid Woodhouse     return -ENOSPC;
1588aa98ee38SDavid Woodhouse 
1589aa98ee38SDavid Woodhouse  found:
1590aa98ee38SDavid Woodhouse     pirq_inuse_word(s, pirq) |= pirq_inuse_bit(pirq);
1591aa98ee38SDavid Woodhouse     if (gsi >= 0) {
1592cf885b19SDavid Woodhouse         assert(gsi < IOAPIC_NUM_PINS);
1593aa98ee38SDavid Woodhouse         s->gsi_pirq[gsi] = pirq;
1594aa98ee38SDavid Woodhouse     }
1595aa98ee38SDavid Woodhouse     s->pirq[pirq].gsi = gsi;
1596aa98ee38SDavid Woodhouse     return pirq;
1597aa98ee38SDavid Woodhouse }
1598aa98ee38SDavid Woodhouse 
xen_evtchn_set_gsi(int gsi,int level)15994f81baa3SDavid Woodhouse bool xen_evtchn_set_gsi(int gsi, int level)
16004f81baa3SDavid Woodhouse {
16014f81baa3SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
16024f81baa3SDavid Woodhouse     int pirq;
16034f81baa3SDavid Woodhouse 
1604195801d7SStefan Hajnoczi     assert(bql_locked());
16054f81baa3SDavid Woodhouse 
1606cf885b19SDavid Woodhouse     if (!s || gsi < 0 || gsi >= IOAPIC_NUM_PINS) {
16074f81baa3SDavid Woodhouse         return false;
16084f81baa3SDavid Woodhouse     }
16094f81baa3SDavid Woodhouse 
16104f81baa3SDavid Woodhouse     /*
16114f81baa3SDavid Woodhouse      * Check that that it *isn't* the event channel GSI, and thus
16124f81baa3SDavid Woodhouse      * that we are not recursing and it's safe to take s->port_lock.
16134f81baa3SDavid Woodhouse      *
16144f81baa3SDavid Woodhouse      * Locking aside, it's perfectly sane to bail out early for that
16154f81baa3SDavid Woodhouse      * special case, as it would make no sense for the event channel
16164f81baa3SDavid Woodhouse      * GSI to be routed back to event channels, when the delivery
16174f81baa3SDavid Woodhouse      * method is to raise the GSI... that recursion wouldn't *just*
16184f81baa3SDavid Woodhouse      * be a locking issue.
16194f81baa3SDavid Woodhouse      */
16204f81baa3SDavid Woodhouse     if (gsi && gsi == s->callback_gsi) {
16214f81baa3SDavid Woodhouse         return false;
16224f81baa3SDavid Woodhouse     }
16234f81baa3SDavid Woodhouse 
16244f81baa3SDavid Woodhouse     QEMU_LOCK_GUARD(&s->port_lock);
16254f81baa3SDavid Woodhouse 
16264f81baa3SDavid Woodhouse     pirq = s->gsi_pirq[gsi];
16274f81baa3SDavid Woodhouse     if (!pirq) {
16284f81baa3SDavid Woodhouse         return false;
16294f81baa3SDavid Woodhouse     }
16304f81baa3SDavid Woodhouse 
16314f81baa3SDavid Woodhouse     if (level) {
16324f81baa3SDavid Woodhouse         int port = s->pirq[pirq].port;
16334f81baa3SDavid Woodhouse 
16344f81baa3SDavid Woodhouse         s->pirq_gsi_set |= (1U << gsi);
16354f81baa3SDavid Woodhouse         if (port) {
16364f81baa3SDavid Woodhouse             set_port_pending(s, port);
16374f81baa3SDavid Woodhouse         }
16384f81baa3SDavid Woodhouse     } else {
16394f81baa3SDavid Woodhouse         s->pirq_gsi_set &= ~(1U << gsi);
16404f81baa3SDavid Woodhouse     }
16414f81baa3SDavid Woodhouse     return true;
16424f81baa3SDavid Woodhouse }
16434f81baa3SDavid Woodhouse 
msi_pirq_target(uint64_t addr,uint32_t data)16446096cf78SDavid Woodhouse static uint32_t msi_pirq_target(uint64_t addr, uint32_t data)
16456096cf78SDavid Woodhouse {
16466096cf78SDavid Woodhouse     /* The vector (in low 8 bits of data) must be zero */
16476096cf78SDavid Woodhouse     if (data & 0xff) {
16486096cf78SDavid Woodhouse         return 0;
16496096cf78SDavid Woodhouse     }
16506096cf78SDavid Woodhouse 
16516096cf78SDavid Woodhouse     uint32_t pirq = (addr & 0xff000) >> 12;
16526096cf78SDavid Woodhouse     pirq |= (addr >> 32) & 0xffffff00;
16536096cf78SDavid Woodhouse 
16546096cf78SDavid Woodhouse     return pirq;
16556096cf78SDavid Woodhouse }
16566096cf78SDavid Woodhouse 
do_remove_pci_vector(XenEvtchnState * s,PCIDevice * dev,int vector,int except_pirq)16576096cf78SDavid Woodhouse static void do_remove_pci_vector(XenEvtchnState *s, PCIDevice *dev, int vector,
16586096cf78SDavid Woodhouse                                  int except_pirq)
16596096cf78SDavid Woodhouse {
16606096cf78SDavid Woodhouse     uint32_t pirq;
16616096cf78SDavid Woodhouse 
16626096cf78SDavid Woodhouse     for (pirq = 0; pirq < s->nr_pirqs; pirq++) {
16636096cf78SDavid Woodhouse         /*
16646096cf78SDavid Woodhouse          * We could be cleverer here, but it isn't really a fast path, and
16656096cf78SDavid Woodhouse          * this trivial optimisation is enough to let us skip the big gap
16666096cf78SDavid Woodhouse          * in the middle a bit quicker (in terms of both loop iterations,
16676096cf78SDavid Woodhouse          * and cache lines).
16686096cf78SDavid Woodhouse          */
16696096cf78SDavid Woodhouse         if (!(pirq & 63) && !(pirq_inuse_word(s, pirq))) {
16706096cf78SDavid Woodhouse             pirq += 64;
16716096cf78SDavid Woodhouse             continue;
16726096cf78SDavid Woodhouse         }
16736096cf78SDavid Woodhouse         if (except_pirq && pirq == except_pirq) {
16746096cf78SDavid Woodhouse             continue;
16756096cf78SDavid Woodhouse         }
16766096cf78SDavid Woodhouse         if (s->pirq[pirq].dev != dev) {
16776096cf78SDavid Woodhouse             continue;
16786096cf78SDavid Woodhouse         }
16796096cf78SDavid Woodhouse         if (vector != -1 && s->pirq[pirq].vector != vector) {
16806096cf78SDavid Woodhouse             continue;
16816096cf78SDavid Woodhouse         }
16826096cf78SDavid Woodhouse 
16836096cf78SDavid Woodhouse         /* It could theoretically be bound to a port already, but that is OK. */
16846096cf78SDavid Woodhouse         s->pirq[pirq].dev = dev;
16856096cf78SDavid Woodhouse         s->pirq[pirq].gsi = IRQ_UNBOUND;
16866096cf78SDavid Woodhouse         s->pirq[pirq].is_msix = false;
16876096cf78SDavid Woodhouse         s->pirq[pirq].vector = 0;
16886096cf78SDavid Woodhouse         s->pirq[pirq].is_masked = false;
16896096cf78SDavid Woodhouse         s->pirq[pirq].is_translated = false;
16906096cf78SDavid Woodhouse     }
16916096cf78SDavid Woodhouse }
16926096cf78SDavid Woodhouse 
xen_evtchn_remove_pci_device(PCIDevice * dev)16936096cf78SDavid Woodhouse void xen_evtchn_remove_pci_device(PCIDevice *dev)
16946096cf78SDavid Woodhouse {
16956096cf78SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
16966096cf78SDavid Woodhouse 
16976096cf78SDavid Woodhouse     if (!s) {
16986096cf78SDavid Woodhouse         return;
16996096cf78SDavid Woodhouse     }
17006096cf78SDavid Woodhouse 
17016096cf78SDavid Woodhouse     QEMU_LOCK_GUARD(&s->port_lock);
17026096cf78SDavid Woodhouse     do_remove_pci_vector(s, dev, -1, 0);
17036096cf78SDavid Woodhouse }
17046096cf78SDavid Woodhouse 
xen_evtchn_snoop_msi(PCIDevice * dev,bool is_msix,unsigned int vector,uint64_t addr,uint32_t data,bool is_masked)17056096cf78SDavid Woodhouse void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector,
17066096cf78SDavid Woodhouse                           uint64_t addr, uint32_t data, bool is_masked)
17076096cf78SDavid Woodhouse {
17086096cf78SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
17096096cf78SDavid Woodhouse     uint32_t pirq;
17106096cf78SDavid Woodhouse 
17116096cf78SDavid Woodhouse     if (!s) {
17126096cf78SDavid Woodhouse         return;
17136096cf78SDavid Woodhouse     }
17146096cf78SDavid Woodhouse 
1715195801d7SStefan Hajnoczi     assert(bql_locked());
17166096cf78SDavid Woodhouse 
17176096cf78SDavid Woodhouse     pirq = msi_pirq_target(addr, data);
17186096cf78SDavid Woodhouse 
17196096cf78SDavid Woodhouse     /*
17206096cf78SDavid Woodhouse      * The PIRQ# must be sane, and there must be an allocated PIRQ in
17216096cf78SDavid Woodhouse      * IRQ_UNBOUND or IRQ_MSI_EMU state to match it.
17226096cf78SDavid Woodhouse      */
17236096cf78SDavid Woodhouse     if (!pirq || pirq >= s->nr_pirqs || !pirq_inuse(s, pirq) ||
17246096cf78SDavid Woodhouse         (s->pirq[pirq].gsi != IRQ_UNBOUND &&
17256096cf78SDavid Woodhouse          s->pirq[pirq].gsi != IRQ_MSI_EMU)) {
17266096cf78SDavid Woodhouse         pirq = 0;
17276096cf78SDavid Woodhouse     }
17286096cf78SDavid Woodhouse 
17296096cf78SDavid Woodhouse     if (pirq) {
17306096cf78SDavid Woodhouse         s->pirq[pirq].dev = dev;
17316096cf78SDavid Woodhouse         s->pirq[pirq].gsi = IRQ_MSI_EMU;
17326096cf78SDavid Woodhouse         s->pirq[pirq].is_msix = is_msix;
17336096cf78SDavid Woodhouse         s->pirq[pirq].vector = vector;
17346096cf78SDavid Woodhouse         s->pirq[pirq].is_masked = is_masked;
17356096cf78SDavid Woodhouse     }
17366096cf78SDavid Woodhouse 
17376096cf78SDavid Woodhouse     /* Remove any (other) entries for this {device, vector} */
17386096cf78SDavid Woodhouse     do_remove_pci_vector(s, dev, vector, pirq);
17396096cf78SDavid Woodhouse }
17406096cf78SDavid Woodhouse 
xen_evtchn_translate_pirq_msi(struct kvm_irq_routing_entry * route,uint64_t address,uint32_t data)17416096cf78SDavid Woodhouse int xen_evtchn_translate_pirq_msi(struct kvm_irq_routing_entry *route,
17426096cf78SDavid Woodhouse                                   uint64_t address, uint32_t data)
17436096cf78SDavid Woodhouse {
17446096cf78SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
17456096cf78SDavid Woodhouse     uint32_t pirq, port;
17466096cf78SDavid Woodhouse     CPUState *cpu;
17476096cf78SDavid Woodhouse 
17486096cf78SDavid Woodhouse     if (!s) {
17496096cf78SDavid Woodhouse         return 1; /* Not a PIRQ */
17506096cf78SDavid Woodhouse     }
17516096cf78SDavid Woodhouse 
1752195801d7SStefan Hajnoczi     assert(bql_locked());
17536096cf78SDavid Woodhouse 
17546096cf78SDavid Woodhouse     pirq = msi_pirq_target(address, data);
17556096cf78SDavid Woodhouse     if (!pirq || pirq >= s->nr_pirqs) {
17566096cf78SDavid Woodhouse         return 1; /* Not a PIRQ */
17576096cf78SDavid Woodhouse     }
17586096cf78SDavid Woodhouse 
17596096cf78SDavid Woodhouse     if (!kvm_xen_has_cap(EVTCHN_2LEVEL)) {
17606096cf78SDavid Woodhouse         return -ENOTSUP;
17616096cf78SDavid Woodhouse     }
17626096cf78SDavid Woodhouse 
17636096cf78SDavid Woodhouse     if (s->pirq[pirq].gsi != IRQ_MSI_EMU) {
17646096cf78SDavid Woodhouse         return -EINVAL;
17656096cf78SDavid Woodhouse     }
17666096cf78SDavid Woodhouse 
17676096cf78SDavid Woodhouse     /* Remember that KVM tried to translate this. It might need to try again. */
17686096cf78SDavid Woodhouse     s->pirq[pirq].is_translated = true;
17696096cf78SDavid Woodhouse 
17706096cf78SDavid Woodhouse     QEMU_LOCK_GUARD(&s->port_lock);
17716096cf78SDavid Woodhouse 
17726096cf78SDavid Woodhouse     port = s->pirq[pirq].port;
17736096cf78SDavid Woodhouse     if (!valid_port(port)) {
17746096cf78SDavid Woodhouse         return -EINVAL;
17756096cf78SDavid Woodhouse     }
17766096cf78SDavid Woodhouse 
17776096cf78SDavid Woodhouse     cpu = qemu_get_cpu(s->port_table[port].vcpu);
17786096cf78SDavid Woodhouse     if (!cpu) {
17796096cf78SDavid Woodhouse         return -EINVAL;
17806096cf78SDavid Woodhouse     }
17816096cf78SDavid Woodhouse 
17826096cf78SDavid Woodhouse     route->type = KVM_IRQ_ROUTING_XEN_EVTCHN;
17836096cf78SDavid Woodhouse     route->u.xen_evtchn.port = port;
17846096cf78SDavid Woodhouse     route->u.xen_evtchn.vcpu = kvm_arch_vcpu_id(cpu);
17856096cf78SDavid Woodhouse     route->u.xen_evtchn.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
17866096cf78SDavid Woodhouse 
17876096cf78SDavid Woodhouse     return 0; /* Handled */
17886096cf78SDavid Woodhouse }
17896096cf78SDavid Woodhouse 
xen_evtchn_deliver_pirq_msi(uint64_t address,uint32_t data)17906096cf78SDavid Woodhouse bool xen_evtchn_deliver_pirq_msi(uint64_t address, uint32_t data)
17916096cf78SDavid Woodhouse {
17926096cf78SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
17936096cf78SDavid Woodhouse     uint32_t pirq, port;
17946096cf78SDavid Woodhouse 
17956096cf78SDavid Woodhouse     if (!s) {
17966096cf78SDavid Woodhouse         return false;
17976096cf78SDavid Woodhouse     }
17986096cf78SDavid Woodhouse 
1799195801d7SStefan Hajnoczi     assert(bql_locked());
18006096cf78SDavid Woodhouse 
18016096cf78SDavid Woodhouse     pirq = msi_pirq_target(address, data);
18026096cf78SDavid Woodhouse     if (!pirq || pirq >= s->nr_pirqs) {
18036096cf78SDavid Woodhouse         return false;
18046096cf78SDavid Woodhouse     }
18056096cf78SDavid Woodhouse 
18066096cf78SDavid Woodhouse     QEMU_LOCK_GUARD(&s->port_lock);
18076096cf78SDavid Woodhouse 
18086096cf78SDavid Woodhouse     port = s->pirq[pirq].port;
18096096cf78SDavid Woodhouse     if (!valid_port(port)) {
18106096cf78SDavid Woodhouse         return false;
18116096cf78SDavid Woodhouse     }
18126096cf78SDavid Woodhouse 
18136096cf78SDavid Woodhouse     set_port_pending(s, port);
18146096cf78SDavid Woodhouse     return true;
18156096cf78SDavid Woodhouse }
18166096cf78SDavid Woodhouse 
xen_physdev_map_pirq(struct physdev_map_pirq * map)1817799c2354SDavid Woodhouse int xen_physdev_map_pirq(struct physdev_map_pirq *map)
1818799c2354SDavid Woodhouse {
1819aa98ee38SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
1820aa98ee38SDavid Woodhouse     int pirq = map->pirq;
1821aa98ee38SDavid Woodhouse     int gsi = map->index;
1822aa98ee38SDavid Woodhouse 
1823aa98ee38SDavid Woodhouse     if (!s) {
1824799c2354SDavid Woodhouse         return -ENOTSUP;
1825799c2354SDavid Woodhouse     }
1826799c2354SDavid Woodhouse 
182732ead8e6SStefan Hajnoczi     BQL_LOCK_GUARD();
1828aa98ee38SDavid Woodhouse     QEMU_LOCK_GUARD(&s->port_lock);
1829aa98ee38SDavid Woodhouse 
1830aa98ee38SDavid Woodhouse     if (map->domid != DOMID_SELF && map->domid != xen_domid) {
1831aa98ee38SDavid Woodhouse         return -EPERM;
1832aa98ee38SDavid Woodhouse     }
1833aa98ee38SDavid Woodhouse     if (map->type != MAP_PIRQ_TYPE_GSI) {
1834aa98ee38SDavid Woodhouse         return -EINVAL;
1835aa98ee38SDavid Woodhouse     }
1836aa98ee38SDavid Woodhouse     if (gsi < 0 || gsi >= IOAPIC_NUM_PINS) {
1837aa98ee38SDavid Woodhouse         return -EINVAL;
1838aa98ee38SDavid Woodhouse     }
1839aa98ee38SDavid Woodhouse 
1840aa98ee38SDavid Woodhouse     if (pirq < 0) {
1841aa98ee38SDavid Woodhouse         pirq = allocate_pirq(s, map->type, gsi);
1842aa98ee38SDavid Woodhouse         if (pirq < 0) {
1843aa98ee38SDavid Woodhouse             return pirq;
1844aa98ee38SDavid Woodhouse         }
1845aa98ee38SDavid Woodhouse         map->pirq = pirq;
1846aa98ee38SDavid Woodhouse     } else if (pirq > s->nr_pirqs) {
1847aa98ee38SDavid Woodhouse         return -EINVAL;
1848aa98ee38SDavid Woodhouse     } else {
1849aa98ee38SDavid Woodhouse         /*
1850aa98ee38SDavid Woodhouse          * User specified a valid-looking PIRQ#. Allow it if it is
1851aa98ee38SDavid Woodhouse          * allocated and not yet bound, or if it is unallocated
1852aa98ee38SDavid Woodhouse          */
1853aa98ee38SDavid Woodhouse         if (pirq_inuse(s, pirq)) {
1854aa98ee38SDavid Woodhouse             if (s->pirq[pirq].gsi != IRQ_UNBOUND) {
1855aa98ee38SDavid Woodhouse                 return -EBUSY;
1856aa98ee38SDavid Woodhouse             }
1857aa98ee38SDavid Woodhouse         } else {
1858aa98ee38SDavid Woodhouse             /* If it was unused, mark it used now. */
1859aa98ee38SDavid Woodhouse             pirq_inuse_word(s, pirq) |= pirq_inuse_bit(pirq);
1860aa98ee38SDavid Woodhouse         }
1861aa98ee38SDavid Woodhouse         /* Set the mapping in both directions. */
1862aa98ee38SDavid Woodhouse         s->pirq[pirq].gsi = gsi;
1863aa98ee38SDavid Woodhouse         s->gsi_pirq[gsi] = pirq;
1864aa98ee38SDavid Woodhouse     }
1865aa98ee38SDavid Woodhouse 
1866aa98ee38SDavid Woodhouse     trace_kvm_xen_map_pirq(pirq, gsi);
1867aa98ee38SDavid Woodhouse     return 0;
1868aa98ee38SDavid Woodhouse }
1869aa98ee38SDavid Woodhouse 
xen_physdev_unmap_pirq(struct physdev_unmap_pirq * unmap)1870799c2354SDavid Woodhouse int xen_physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
1871799c2354SDavid Woodhouse {
1872aa98ee38SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
1873aa98ee38SDavid Woodhouse     int pirq = unmap->pirq;
1874aa98ee38SDavid Woodhouse     int gsi;
1875aa98ee38SDavid Woodhouse 
1876aa98ee38SDavid Woodhouse     if (!s) {
1877799c2354SDavid Woodhouse         return -ENOTSUP;
1878799c2354SDavid Woodhouse     }
1879799c2354SDavid Woodhouse 
1880aa98ee38SDavid Woodhouse     if (unmap->domid != DOMID_SELF && unmap->domid != xen_domid) {
1881aa98ee38SDavid Woodhouse         return -EPERM;
1882aa98ee38SDavid Woodhouse     }
1883aa98ee38SDavid Woodhouse     if (pirq < 0 || pirq >= s->nr_pirqs) {
1884aa98ee38SDavid Woodhouse         return -EINVAL;
1885aa98ee38SDavid Woodhouse     }
1886aa98ee38SDavid Woodhouse 
188732ead8e6SStefan Hajnoczi     BQL_LOCK_GUARD();
18886096cf78SDavid Woodhouse     qemu_mutex_lock(&s->port_lock);
1889aa98ee38SDavid Woodhouse 
1890aa98ee38SDavid Woodhouse     if (!pirq_inuse(s, pirq)) {
18916096cf78SDavid Woodhouse         qemu_mutex_unlock(&s->port_lock);
1892aa98ee38SDavid Woodhouse         return -ENOENT;
1893aa98ee38SDavid Woodhouse     }
1894aa98ee38SDavid Woodhouse 
1895aa98ee38SDavid Woodhouse     gsi = s->pirq[pirq].gsi;
1896aa98ee38SDavid Woodhouse 
1897aa98ee38SDavid Woodhouse     /* We can only unmap GSI PIRQs */
1898aa98ee38SDavid Woodhouse     if (gsi < 0) {
18996096cf78SDavid Woodhouse         qemu_mutex_unlock(&s->port_lock);
1900aa98ee38SDavid Woodhouse         return -EINVAL;
1901aa98ee38SDavid Woodhouse     }
1902aa98ee38SDavid Woodhouse 
1903aa98ee38SDavid Woodhouse     s->gsi_pirq[gsi] = 0;
1904aa98ee38SDavid Woodhouse     s->pirq[pirq].gsi = IRQ_UNBOUND; /* Doesn't actually matter because: */
1905aa98ee38SDavid Woodhouse     pirq_inuse_word(s, pirq) &= ~pirq_inuse_bit(pirq);
1906aa98ee38SDavid Woodhouse 
1907aa98ee38SDavid Woodhouse     trace_kvm_xen_unmap_pirq(pirq, gsi);
19086096cf78SDavid Woodhouse     qemu_mutex_unlock(&s->port_lock);
19096096cf78SDavid Woodhouse 
19106096cf78SDavid Woodhouse     if (gsi == IRQ_MSI_EMU) {
19116096cf78SDavid Woodhouse         kvm_update_msi_routes_all(NULL, true, 0, 0);
19126096cf78SDavid Woodhouse     }
19136096cf78SDavid Woodhouse 
1914aa98ee38SDavid Woodhouse     return 0;
1915aa98ee38SDavid Woodhouse }
1916aa98ee38SDavid Woodhouse 
xen_physdev_eoi_pirq(struct physdev_eoi * eoi)1917799c2354SDavid Woodhouse int xen_physdev_eoi_pirq(struct physdev_eoi *eoi)
1918799c2354SDavid Woodhouse {
1919aa98ee38SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
1920aa98ee38SDavid Woodhouse     int pirq = eoi->irq;
1921aa98ee38SDavid Woodhouse     int gsi;
1922aa98ee38SDavid Woodhouse 
1923aa98ee38SDavid Woodhouse     if (!s) {
1924799c2354SDavid Woodhouse         return -ENOTSUP;
1925799c2354SDavid Woodhouse     }
1926799c2354SDavid Woodhouse 
192732ead8e6SStefan Hajnoczi     BQL_LOCK_GUARD();
1928aa98ee38SDavid Woodhouse     QEMU_LOCK_GUARD(&s->port_lock);
1929aa98ee38SDavid Woodhouse 
1930aa98ee38SDavid Woodhouse     if (!pirq_inuse(s, pirq)) {
1931aa98ee38SDavid Woodhouse         return -ENOENT;
1932aa98ee38SDavid Woodhouse     }
1933aa98ee38SDavid Woodhouse 
1934aa98ee38SDavid Woodhouse     gsi = s->pirq[pirq].gsi;
1935aa98ee38SDavid Woodhouse     if (gsi < 0) {
1936aa98ee38SDavid Woodhouse         return -EINVAL;
1937aa98ee38SDavid Woodhouse     }
1938aa98ee38SDavid Woodhouse 
19394f81baa3SDavid Woodhouse     /* Reassert a level IRQ if needed */
19404f81baa3SDavid Woodhouse     if (s->pirq_gsi_set & (1U << gsi)) {
19414f81baa3SDavid Woodhouse         int port = s->pirq[pirq].port;
19424f81baa3SDavid Woodhouse         if (port) {
19434f81baa3SDavid Woodhouse             set_port_pending(s, port);
19444f81baa3SDavid Woodhouse         }
19454f81baa3SDavid Woodhouse     }
19464f81baa3SDavid Woodhouse 
1947aa98ee38SDavid Woodhouse     return 0;
1948aa98ee38SDavid Woodhouse }
1949aa98ee38SDavid Woodhouse 
xen_physdev_query_pirq(struct physdev_irq_status_query * query)1950799c2354SDavid Woodhouse int xen_physdev_query_pirq(struct physdev_irq_status_query *query)
1951799c2354SDavid Woodhouse {
1952aa98ee38SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
1953aa98ee38SDavid Woodhouse     int pirq = query->irq;
1954aa98ee38SDavid Woodhouse 
1955aa98ee38SDavid Woodhouse     if (!s) {
1956799c2354SDavid Woodhouse         return -ENOTSUP;
1957799c2354SDavid Woodhouse     }
1958799c2354SDavid Woodhouse 
195932ead8e6SStefan Hajnoczi     BQL_LOCK_GUARD();
1960aa98ee38SDavid Woodhouse     QEMU_LOCK_GUARD(&s->port_lock);
1961aa98ee38SDavid Woodhouse 
1962aa98ee38SDavid Woodhouse     if (!pirq_inuse(s, pirq)) {
1963aa98ee38SDavid Woodhouse         return -ENOENT;
1964aa98ee38SDavid Woodhouse     }
1965aa98ee38SDavid Woodhouse 
1966aa98ee38SDavid Woodhouse     if (s->pirq[pirq].gsi >= 0) {
1967aa98ee38SDavid Woodhouse         query->flags = XENIRQSTAT_needs_eoi;
1968aa98ee38SDavid Woodhouse     } else {
1969aa98ee38SDavid Woodhouse         query->flags = 0;
1970aa98ee38SDavid Woodhouse     }
1971aa98ee38SDavid Woodhouse 
1972aa98ee38SDavid Woodhouse     return 0;
1973aa98ee38SDavid Woodhouse }
1974aa98ee38SDavid Woodhouse 
xen_physdev_get_free_pirq(struct physdev_get_free_pirq * get)1975799c2354SDavid Woodhouse int xen_physdev_get_free_pirq(struct physdev_get_free_pirq *get)
1976799c2354SDavid Woodhouse {
1977aa98ee38SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
1978aa98ee38SDavid Woodhouse     int pirq;
1979aa98ee38SDavid Woodhouse 
1980aa98ee38SDavid Woodhouse     if (!s) {
1981799c2354SDavid Woodhouse         return -ENOTSUP;
1982799c2354SDavid Woodhouse     }
1983799c2354SDavid Woodhouse 
1984aa98ee38SDavid Woodhouse     QEMU_LOCK_GUARD(&s->port_lock);
1985aa98ee38SDavid Woodhouse 
1986aa98ee38SDavid Woodhouse     pirq = allocate_pirq(s, get->type, IRQ_UNBOUND);
1987aa98ee38SDavid Woodhouse     if (pirq < 0) {
1988aa98ee38SDavid Woodhouse         return pirq;
1989aa98ee38SDavid Woodhouse     }
1990aa98ee38SDavid Woodhouse 
1991aa98ee38SDavid Woodhouse     get->pirq = pirq;
1992aa98ee38SDavid Woodhouse     trace_kvm_xen_get_free_pirq(pirq, get->type);
1993aa98ee38SDavid Woodhouse     return 0;
1994aa98ee38SDavid Woodhouse }
1995aa98ee38SDavid Woodhouse 
xen_be_evtchn_open(void)1996794fba23SDavid Woodhouse struct xenevtchn_handle *xen_be_evtchn_open(void)
1997794fba23SDavid Woodhouse {
1998794fba23SDavid Woodhouse     struct xenevtchn_handle *xc = g_new0(struct xenevtchn_handle, 1);
1999794fba23SDavid Woodhouse 
2000794fba23SDavid Woodhouse     xc->fd = eventfd(0, EFD_CLOEXEC);
2001794fba23SDavid Woodhouse     if (xc->fd < 0) {
2002794fba23SDavid Woodhouse         free(xc);
2003794fba23SDavid Woodhouse         return NULL;
2004794fba23SDavid Woodhouse     }
2005794fba23SDavid Woodhouse 
2006794fba23SDavid Woodhouse     return xc;
2007794fba23SDavid Woodhouse }
2008794fba23SDavid Woodhouse 
find_be_port(XenEvtchnState * s,struct xenevtchn_handle * xc)2009794fba23SDavid Woodhouse static int find_be_port(XenEvtchnState *s, struct xenevtchn_handle *xc)
2010794fba23SDavid Woodhouse {
2011794fba23SDavid Woodhouse     int i;
2012794fba23SDavid Woodhouse 
2013794fba23SDavid Woodhouse     for (i = 1; i < EVTCHN_2L_NR_CHANNELS; i++) {
2014794fba23SDavid Woodhouse         if (!s->be_handles[i]) {
2015794fba23SDavid Woodhouse             s->be_handles[i] = xc;
2016794fba23SDavid Woodhouse             xc->be_port = i;
2017794fba23SDavid Woodhouse             return i;
2018794fba23SDavid Woodhouse         }
2019794fba23SDavid Woodhouse     }
2020794fba23SDavid Woodhouse     return 0;
2021794fba23SDavid Woodhouse }
2022794fba23SDavid Woodhouse 
xen_be_evtchn_bind_interdomain(struct xenevtchn_handle * xc,uint32_t domid,evtchn_port_t guest_port)2023794fba23SDavid Woodhouse int xen_be_evtchn_bind_interdomain(struct xenevtchn_handle *xc, uint32_t domid,
2024794fba23SDavid Woodhouse                                    evtchn_port_t guest_port)
2025794fba23SDavid Woodhouse {
2026794fba23SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
2027794fba23SDavid Woodhouse     XenEvtchnPort *gp;
2028794fba23SDavid Woodhouse     uint16_t be_port = 0;
2029794fba23SDavid Woodhouse     int ret;
2030794fba23SDavid Woodhouse 
2031794fba23SDavid Woodhouse     if (!s) {
2032794fba23SDavid Woodhouse         return -ENOTSUP;
2033794fba23SDavid Woodhouse     }
2034794fba23SDavid Woodhouse 
2035794fba23SDavid Woodhouse     if (!xc) {
2036794fba23SDavid Woodhouse         return -EFAULT;
2037794fba23SDavid Woodhouse     }
2038794fba23SDavid Woodhouse 
2039794fba23SDavid Woodhouse     if (domid != xen_domid) {
2040794fba23SDavid Woodhouse         return -ESRCH;
2041794fba23SDavid Woodhouse     }
2042794fba23SDavid Woodhouse 
2043794fba23SDavid Woodhouse     if (!valid_port(guest_port)) {
2044794fba23SDavid Woodhouse         return -EINVAL;
2045794fba23SDavid Woodhouse     }
2046794fba23SDavid Woodhouse 
2047794fba23SDavid Woodhouse     qemu_mutex_lock(&s->port_lock);
2048794fba23SDavid Woodhouse 
2049794fba23SDavid Woodhouse     /* The guest has to have an unbound port waiting for us to bind */
2050794fba23SDavid Woodhouse     gp = &s->port_table[guest_port];
2051794fba23SDavid Woodhouse 
2052794fba23SDavid Woodhouse     switch (gp->type) {
2053794fba23SDavid Woodhouse     case EVTCHNSTAT_interdomain:
2054794fba23SDavid Woodhouse         /* Allow rebinding after migration, preserve port # if possible */
2055be155098SDavid Woodhouse         be_port = gp->u.interdomain.port;
2056794fba23SDavid Woodhouse         assert(be_port != 0);
2057794fba23SDavid Woodhouse         if (!s->be_handles[be_port]) {
2058794fba23SDavid Woodhouse             s->be_handles[be_port] = xc;
2059794fba23SDavid Woodhouse             xc->guest_port = guest_port;
2060794fba23SDavid Woodhouse             ret = xc->be_port = be_port;
2061794fba23SDavid Woodhouse             if (kvm_xen_has_cap(EVTCHN_SEND)) {
2062794fba23SDavid Woodhouse                 assign_kernel_eventfd(gp->type, guest_port, xc->fd);
2063794fba23SDavid Woodhouse             }
2064794fba23SDavid Woodhouse             break;
2065794fba23SDavid Woodhouse         }
2066794fba23SDavid Woodhouse         /* fall through */
2067794fba23SDavid Woodhouse 
2068794fba23SDavid Woodhouse     case EVTCHNSTAT_unbound:
2069794fba23SDavid Woodhouse         be_port = find_be_port(s, xc);
2070794fba23SDavid Woodhouse         if (!be_port) {
2071794fba23SDavid Woodhouse             ret = -ENOSPC;
2072794fba23SDavid Woodhouse             goto out;
2073794fba23SDavid Woodhouse         }
2074794fba23SDavid Woodhouse 
2075794fba23SDavid Woodhouse         gp->type = EVTCHNSTAT_interdomain;
2076be155098SDavid Woodhouse         gp->u.interdomain.to_qemu = 1;
2077be155098SDavid Woodhouse         gp->u.interdomain.port = be_port;
2078794fba23SDavid Woodhouse         xc->guest_port = guest_port;
2079794fba23SDavid Woodhouse         if (kvm_xen_has_cap(EVTCHN_SEND)) {
2080794fba23SDavid Woodhouse             assign_kernel_eventfd(gp->type, guest_port, xc->fd);
2081794fba23SDavid Woodhouse         }
2082794fba23SDavid Woodhouse         ret = be_port;
2083794fba23SDavid Woodhouse         break;
2084794fba23SDavid Woodhouse 
2085794fba23SDavid Woodhouse     default:
2086794fba23SDavid Woodhouse         ret = -EINVAL;
2087794fba23SDavid Woodhouse         break;
2088794fba23SDavid Woodhouse     }
2089794fba23SDavid Woodhouse 
2090794fba23SDavid Woodhouse  out:
2091794fba23SDavid Woodhouse     qemu_mutex_unlock(&s->port_lock);
2092794fba23SDavid Woodhouse 
2093794fba23SDavid Woodhouse     return ret;
2094794fba23SDavid Woodhouse }
2095794fba23SDavid Woodhouse 
xen_be_evtchn_unbind(struct xenevtchn_handle * xc,evtchn_port_t port)2096794fba23SDavid Woodhouse int xen_be_evtchn_unbind(struct xenevtchn_handle *xc, evtchn_port_t port)
2097794fba23SDavid Woodhouse {
2098794fba23SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
2099794fba23SDavid Woodhouse     int ret;
2100794fba23SDavid Woodhouse 
2101794fba23SDavid Woodhouse     if (!s) {
2102794fba23SDavid Woodhouse         return -ENOTSUP;
2103794fba23SDavid Woodhouse     }
2104794fba23SDavid Woodhouse 
2105794fba23SDavid Woodhouse     if (!xc) {
2106794fba23SDavid Woodhouse         return -EFAULT;
2107794fba23SDavid Woodhouse     }
2108794fba23SDavid Woodhouse 
2109794fba23SDavid Woodhouse     qemu_mutex_lock(&s->port_lock);
2110794fba23SDavid Woodhouse 
2111794fba23SDavid Woodhouse     if (port && port != xc->be_port) {
2112794fba23SDavid Woodhouse         ret = -EINVAL;
2113794fba23SDavid Woodhouse         goto out;
2114794fba23SDavid Woodhouse     }
2115794fba23SDavid Woodhouse 
2116794fba23SDavid Woodhouse     if (xc->guest_port) {
2117794fba23SDavid Woodhouse         XenEvtchnPort *gp = &s->port_table[xc->guest_port];
2118794fba23SDavid Woodhouse 
2119794fba23SDavid Woodhouse         /* This should never *not* be true */
2120794fba23SDavid Woodhouse         if (gp->type == EVTCHNSTAT_interdomain) {
2121794fba23SDavid Woodhouse             gp->type = EVTCHNSTAT_unbound;
2122be155098SDavid Woodhouse             gp->u.interdomain.port = 0;
2123794fba23SDavid Woodhouse         }
2124794fba23SDavid Woodhouse 
2125794fba23SDavid Woodhouse         if (kvm_xen_has_cap(EVTCHN_SEND)) {
2126794fba23SDavid Woodhouse             deassign_kernel_port(xc->guest_port);
2127794fba23SDavid Woodhouse         }
2128794fba23SDavid Woodhouse         xc->guest_port = 0;
2129794fba23SDavid Woodhouse     }
2130794fba23SDavid Woodhouse 
2131794fba23SDavid Woodhouse     s->be_handles[xc->be_port] = NULL;
2132794fba23SDavid Woodhouse     xc->be_port = 0;
2133794fba23SDavid Woodhouse     ret = 0;
2134794fba23SDavid Woodhouse  out:
2135794fba23SDavid Woodhouse     qemu_mutex_unlock(&s->port_lock);
2136794fba23SDavid Woodhouse     return ret;
2137794fba23SDavid Woodhouse }
2138794fba23SDavid Woodhouse 
xen_be_evtchn_close(struct xenevtchn_handle * xc)2139794fba23SDavid Woodhouse int xen_be_evtchn_close(struct xenevtchn_handle *xc)
2140794fba23SDavid Woodhouse {
2141794fba23SDavid Woodhouse     if (!xc) {
2142794fba23SDavid Woodhouse         return -EFAULT;
2143794fba23SDavid Woodhouse     }
2144794fba23SDavid Woodhouse 
2145794fba23SDavid Woodhouse     xen_be_evtchn_unbind(xc, 0);
2146794fba23SDavid Woodhouse 
2147794fba23SDavid Woodhouse     close(xc->fd);
2148794fba23SDavid Woodhouse     free(xc);
2149794fba23SDavid Woodhouse     return 0;
2150794fba23SDavid Woodhouse }
2151794fba23SDavid Woodhouse 
xen_be_evtchn_fd(struct xenevtchn_handle * xc)2152794fba23SDavid Woodhouse int xen_be_evtchn_fd(struct xenevtchn_handle *xc)
2153794fba23SDavid Woodhouse {
2154794fba23SDavid Woodhouse     if (!xc) {
2155794fba23SDavid Woodhouse         return -1;
2156794fba23SDavid Woodhouse     }
2157794fba23SDavid Woodhouse     return xc->fd;
2158794fba23SDavid Woodhouse }
2159794fba23SDavid Woodhouse 
xen_be_evtchn_notify(struct xenevtchn_handle * xc,evtchn_port_t port)2160794fba23SDavid Woodhouse int xen_be_evtchn_notify(struct xenevtchn_handle *xc, evtchn_port_t port)
2161794fba23SDavid Woodhouse {
2162794fba23SDavid Woodhouse     XenEvtchnState *s = xen_evtchn_singleton;
2163794fba23SDavid Woodhouse     int ret;
2164794fba23SDavid Woodhouse 
2165794fba23SDavid Woodhouse     if (!s) {
2166794fba23SDavid Woodhouse         return -ENOTSUP;
2167794fba23SDavid Woodhouse     }
2168794fba23SDavid Woodhouse 
2169794fba23SDavid Woodhouse     if (!xc) {
2170794fba23SDavid Woodhouse         return -EFAULT;
2171794fba23SDavid Woodhouse     }
2172794fba23SDavid Woodhouse 
2173794fba23SDavid Woodhouse     qemu_mutex_lock(&s->port_lock);
2174794fba23SDavid Woodhouse 
2175794fba23SDavid Woodhouse     if (xc->guest_port) {
2176794fba23SDavid Woodhouse         set_port_pending(s, xc->guest_port);
2177794fba23SDavid Woodhouse         ret = 0;
2178794fba23SDavid Woodhouse     } else {
2179794fba23SDavid Woodhouse         ret = -ENOTCONN;
2180794fba23SDavid Woodhouse     }
2181794fba23SDavid Woodhouse 
2182794fba23SDavid Woodhouse     qemu_mutex_unlock(&s->port_lock);
2183794fba23SDavid Woodhouse 
2184794fba23SDavid Woodhouse     return ret;
2185794fba23SDavid Woodhouse }
2186794fba23SDavid Woodhouse 
xen_be_evtchn_pending(struct xenevtchn_handle * xc)2187794fba23SDavid Woodhouse int xen_be_evtchn_pending(struct xenevtchn_handle *xc)
2188794fba23SDavid Woodhouse {
2189794fba23SDavid Woodhouse     uint64_t val;
2190794fba23SDavid Woodhouse 
2191794fba23SDavid Woodhouse     if (!xc) {
2192794fba23SDavid Woodhouse         return -EFAULT;
2193794fba23SDavid Woodhouse     }
2194794fba23SDavid Woodhouse 
2195794fba23SDavid Woodhouse     if (!xc->be_port) {
2196794fba23SDavid Woodhouse         return 0;
2197794fba23SDavid Woodhouse     }
2198794fba23SDavid Woodhouse 
2199794fba23SDavid Woodhouse     if (eventfd_read(xc->fd, &val)) {
2200794fba23SDavid Woodhouse         return -errno;
2201794fba23SDavid Woodhouse     }
2202794fba23SDavid Woodhouse 
2203794fba23SDavid Woodhouse     return val ? xc->be_port : 0;
2204794fba23SDavid Woodhouse }
2205794fba23SDavid Woodhouse 
xen_be_evtchn_unmask(struct xenevtchn_handle * xc,evtchn_port_t port)2206794fba23SDavid Woodhouse int xen_be_evtchn_unmask(struct xenevtchn_handle *xc, evtchn_port_t port)
2207794fba23SDavid Woodhouse {
2208794fba23SDavid Woodhouse     if (!xc) {
2209794fba23SDavid Woodhouse         return -EFAULT;
2210794fba23SDavid Woodhouse     }
2211794fba23SDavid Woodhouse 
2212794fba23SDavid Woodhouse     if (xc->be_port != port) {
2213794fba23SDavid Woodhouse         return -EINVAL;
2214794fba23SDavid Woodhouse     }
2215794fba23SDavid Woodhouse 
2216794fba23SDavid Woodhouse     /*
2217794fba23SDavid Woodhouse      * We don't actually do anything to unmask it; the event was already
2218794fba23SDavid Woodhouse      * consumed in xen_be_evtchn_pending().
2219794fba23SDavid Woodhouse      */
2220794fba23SDavid Woodhouse     return 0;
2221794fba23SDavid Woodhouse }
2222794fba23SDavid Woodhouse 
xen_be_evtchn_get_guest_port(struct xenevtchn_handle * xc)2223794fba23SDavid Woodhouse int xen_be_evtchn_get_guest_port(struct xenevtchn_handle *xc)
2224794fba23SDavid Woodhouse {
2225794fba23SDavid Woodhouse     return xc->guest_port;
2226794fba23SDavid Woodhouse }
2227794fba23SDavid Woodhouse 
qmp_xen_event_list(Error ** errp)2228507cb64dSJoao Martins EvtchnInfoList *qmp_xen_event_list(Error **errp)
2229507cb64dSJoao Martins {
2230507cb64dSJoao Martins     XenEvtchnState *s = xen_evtchn_singleton;
2231507cb64dSJoao Martins     EvtchnInfoList *head = NULL, **tail = &head;
2232507cb64dSJoao Martins     void *shinfo, *pending, *mask;
2233507cb64dSJoao Martins     int i;
2234507cb64dSJoao Martins 
2235507cb64dSJoao Martins     if (!s) {
2236507cb64dSJoao Martins         error_setg(errp, "Xen event channel emulation not enabled");
2237507cb64dSJoao Martins         return NULL;
2238507cb64dSJoao Martins     }
2239507cb64dSJoao Martins 
2240507cb64dSJoao Martins     shinfo = xen_overlay_get_shinfo_ptr();
2241507cb64dSJoao Martins     if (!shinfo) {
2242507cb64dSJoao Martins         error_setg(errp, "Xen shared info page not allocated");
2243507cb64dSJoao Martins         return NULL;
2244507cb64dSJoao Martins     }
2245507cb64dSJoao Martins 
2246507cb64dSJoao Martins     if (xen_is_long_mode()) {
2247507cb64dSJoao Martins         pending = shinfo + offsetof(struct shared_info, evtchn_pending);
2248507cb64dSJoao Martins         mask = shinfo + offsetof(struct shared_info, evtchn_mask);
2249507cb64dSJoao Martins     } else {
2250507cb64dSJoao Martins         pending = shinfo + offsetof(struct compat_shared_info, evtchn_pending);
2251507cb64dSJoao Martins         mask = shinfo + offsetof(struct compat_shared_info, evtchn_mask);
2252507cb64dSJoao Martins     }
2253507cb64dSJoao Martins 
2254507cb64dSJoao Martins     QEMU_LOCK_GUARD(&s->port_lock);
2255507cb64dSJoao Martins 
2256507cb64dSJoao Martins     for (i = 0; i < s->nr_ports; i++) {
2257507cb64dSJoao Martins         XenEvtchnPort *p = &s->port_table[i];
2258507cb64dSJoao Martins         EvtchnInfo *info;
2259507cb64dSJoao Martins 
2260507cb64dSJoao Martins         if (p->type == EVTCHNSTAT_closed) {
2261507cb64dSJoao Martins             continue;
2262507cb64dSJoao Martins         }
2263507cb64dSJoao Martins 
2264507cb64dSJoao Martins         info = g_new0(EvtchnInfo, 1);
2265507cb64dSJoao Martins 
2266507cb64dSJoao Martins         info->port = i;
2267507cb64dSJoao Martins         qemu_build_assert(EVTCHN_PORT_TYPE_CLOSED == EVTCHNSTAT_closed);
2268507cb64dSJoao Martins         qemu_build_assert(EVTCHN_PORT_TYPE_UNBOUND == EVTCHNSTAT_unbound);
2269507cb64dSJoao Martins         qemu_build_assert(EVTCHN_PORT_TYPE_INTERDOMAIN == EVTCHNSTAT_interdomain);
2270507cb64dSJoao Martins         qemu_build_assert(EVTCHN_PORT_TYPE_PIRQ == EVTCHNSTAT_pirq);
2271507cb64dSJoao Martins         qemu_build_assert(EVTCHN_PORT_TYPE_VIRQ == EVTCHNSTAT_virq);
2272507cb64dSJoao Martins         qemu_build_assert(EVTCHN_PORT_TYPE_IPI == EVTCHNSTAT_ipi);
2273507cb64dSJoao Martins 
2274507cb64dSJoao Martins         info->type = p->type;
2275507cb64dSJoao Martins         if (p->type == EVTCHNSTAT_interdomain) {
2276be155098SDavid Woodhouse             info->remote_domain = g_strdup(p->u.interdomain.to_qemu ?
2277507cb64dSJoao Martins                                            "qemu" : "loopback");
2278be155098SDavid Woodhouse             info->target = p->u.interdomain.port;
2279507cb64dSJoao Martins         } else {
2280be155098SDavid Woodhouse             info->target = p->u.val; /* pirq# or virq# */
2281507cb64dSJoao Martins         }
2282507cb64dSJoao Martins         info->vcpu = p->vcpu;
2283507cb64dSJoao Martins         info->pending = test_bit(i, pending);
2284507cb64dSJoao Martins         info->masked = test_bit(i, mask);
2285507cb64dSJoao Martins 
2286507cb64dSJoao Martins         QAPI_LIST_APPEND(tail, info);
2287507cb64dSJoao Martins     }
2288507cb64dSJoao Martins 
2289507cb64dSJoao Martins     return head;
2290507cb64dSJoao Martins }
2291507cb64dSJoao Martins 
qmp_xen_event_inject(uint32_t port,Error ** errp)2292507cb64dSJoao Martins void qmp_xen_event_inject(uint32_t port, Error **errp)
2293507cb64dSJoao Martins {
2294507cb64dSJoao Martins     XenEvtchnState *s = xen_evtchn_singleton;
2295507cb64dSJoao Martins 
2296507cb64dSJoao Martins     if (!s) {
2297507cb64dSJoao Martins         error_setg(errp, "Xen event channel emulation not enabled");
2298507cb64dSJoao Martins         return;
2299507cb64dSJoao Martins     }
2300507cb64dSJoao Martins 
2301507cb64dSJoao Martins     if (!valid_port(port)) {
2302507cb64dSJoao Martins         error_setg(errp, "Invalid port %u", port);
2303507cb64dSJoao Martins     }
2304507cb64dSJoao Martins 
2305507cb64dSJoao Martins     QEMU_LOCK_GUARD(&s->port_lock);
2306507cb64dSJoao Martins 
2307507cb64dSJoao Martins     if (set_port_pending(s, port)) {
2308507cb64dSJoao Martins         error_setg(errp, "Failed to set port %u", port);
2309507cb64dSJoao Martins         return;
2310507cb64dSJoao Martins     }
2311507cb64dSJoao Martins }
2312507cb64dSJoao Martins 
hmp_xen_event_list(Monitor * mon,const QDict * qdict)2313507cb64dSJoao Martins void hmp_xen_event_list(Monitor *mon, const QDict *qdict)
2314507cb64dSJoao Martins {
2315507cb64dSJoao Martins     EvtchnInfoList *iter, *info_list;
2316507cb64dSJoao Martins     Error *err = NULL;
2317507cb64dSJoao Martins 
2318507cb64dSJoao Martins     info_list = qmp_xen_event_list(&err);
2319507cb64dSJoao Martins     if (err) {
2320507cb64dSJoao Martins         hmp_handle_error(mon, err);
2321507cb64dSJoao Martins         return;
2322507cb64dSJoao Martins     }
2323507cb64dSJoao Martins 
2324507cb64dSJoao Martins     for (iter = info_list; iter; iter = iter->next) {
2325507cb64dSJoao Martins         EvtchnInfo *info = iter->value;
2326507cb64dSJoao Martins 
2327507cb64dSJoao Martins         monitor_printf(mon, "port %4u: vcpu: %d %s", info->port, info->vcpu,
2328507cb64dSJoao Martins                        EvtchnPortType_str(info->type));
2329507cb64dSJoao Martins         if (info->type != EVTCHN_PORT_TYPE_IPI) {
2330507cb64dSJoao Martins             monitor_printf(mon,  "(");
2331507cb64dSJoao Martins             if (info->remote_domain) {
2332507cb64dSJoao Martins                 monitor_printf(mon, "%s:", info->remote_domain);
2333507cb64dSJoao Martins             }
2334507cb64dSJoao Martins             monitor_printf(mon, "%d)", info->target);
2335507cb64dSJoao Martins         }
2336507cb64dSJoao Martins         if (info->pending) {
2337507cb64dSJoao Martins             monitor_printf(mon, " PENDING");
2338507cb64dSJoao Martins         }
2339507cb64dSJoao Martins         if (info->masked) {
2340507cb64dSJoao Martins             monitor_printf(mon, " MASKED");
2341507cb64dSJoao Martins         }
2342507cb64dSJoao Martins         monitor_printf(mon, "\n");
2343507cb64dSJoao Martins     }
2344507cb64dSJoao Martins 
2345507cb64dSJoao Martins     qapi_free_EvtchnInfoList(info_list);
2346507cb64dSJoao Martins }
2347507cb64dSJoao Martins 
hmp_xen_event_inject(Monitor * mon,const QDict * qdict)2348507cb64dSJoao Martins void hmp_xen_event_inject(Monitor *mon, const QDict *qdict)
2349507cb64dSJoao Martins {
2350507cb64dSJoao Martins     int port = qdict_get_int(qdict, "port");
2351507cb64dSJoao Martins     Error *err = NULL;
2352507cb64dSJoao Martins 
2353507cb64dSJoao Martins     qmp_xen_event_inject(port, &err);
2354507cb64dSJoao Martins     if (err) {
2355507cb64dSJoao Martins         hmp_handle_error(mon, err);
2356507cb64dSJoao Martins     } else {
2357507cb64dSJoao Martins         monitor_printf(mon, "Delivered port %d\n", port);
2358507cb64dSJoao Martins     }
2359507cb64dSJoao Martins }
2360507cb64dSJoao Martins 
2361