1f7116284SIan Campbell /****************************************************************************** 2f7116284SIan Campbell * evtchn.c 3f7116284SIan Campbell * 4f7116284SIan Campbell * Driver for receiving and demuxing event-channel signals. 5f7116284SIan Campbell * 6f7116284SIan Campbell * Copyright (c) 2004-2005, K A Fraser 7f7116284SIan Campbell * Multi-process extensions Copyright (c) 2004, Steven Smith 8f7116284SIan Campbell * 9f7116284SIan Campbell * This program is free software; you can redistribute it and/or 10f7116284SIan Campbell * modify it under the terms of the GNU General Public License version 2 11f7116284SIan Campbell * as published by the Free Software Foundation; or, when distributed 12f7116284SIan Campbell * separately from the Linux kernel or incorporated into other 13f7116284SIan Campbell * software packages, subject to the following license: 14f7116284SIan Campbell * 15f7116284SIan Campbell * Permission is hereby granted, free of charge, to any person obtaining a copy 16f7116284SIan Campbell * of this source file (the "Software"), to deal in the Software without 17f7116284SIan Campbell * restriction, including without limitation the rights to use, copy, modify, 18f7116284SIan Campbell * merge, publish, distribute, sublicense, and/or sell copies of the Software, 19f7116284SIan Campbell * and to permit persons to whom the Software is furnished to do so, subject to 20f7116284SIan Campbell * the following conditions: 21f7116284SIan Campbell * 22f7116284SIan Campbell * The above copyright notice and this permission notice shall be included in 23f7116284SIan Campbell * all copies or substantial portions of the Software. 24f7116284SIan Campbell * 25f7116284SIan Campbell * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26f7116284SIan Campbell * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27f7116284SIan Campbell * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 28f7116284SIan Campbell * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 29f7116284SIan Campbell * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 30f7116284SIan Campbell * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 31f7116284SIan Campbell * IN THE SOFTWARE. 32f7116284SIan Campbell */ 33f7116284SIan Campbell 34283c0972SJoe Perches #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 35283c0972SJoe Perches 36f7116284SIan Campbell #include <linux/module.h> 37f7116284SIan Campbell #include <linux/kernel.h> 38f7116284SIan Campbell #include <linux/sched.h> 39f7116284SIan Campbell #include <linux/slab.h> 40f7116284SIan Campbell #include <linux/string.h> 41f7116284SIan Campbell #include <linux/errno.h> 42f7116284SIan Campbell #include <linux/fs.h> 43f7116284SIan Campbell #include <linux/miscdevice.h> 44f7116284SIan Campbell #include <linux/major.h> 45f7116284SIan Campbell #include <linux/proc_fs.h> 46f7116284SIan Campbell #include <linux/stat.h> 47f7116284SIan Campbell #include <linux/poll.h> 48f7116284SIan Campbell #include <linux/irq.h> 49f7116284SIan Campbell #include <linux/init.h> 50f7116284SIan Campbell #include <linux/mutex.h> 51f7116284SIan Campbell #include <linux/cpu.h> 521ccbf534SJeremy Fitzhardinge 531ccbf534SJeremy Fitzhardinge #include <xen/xen.h> 54f7116284SIan Campbell #include <xen/events.h> 55f7116284SIan Campbell #include <xen/evtchn.h> 56f7116284SIan Campbell #include <asm/xen/hypervisor.h> 57f7116284SIan Campbell 58f7116284SIan Campbell struct per_user_data { 590a4666b5SJeremy Fitzhardinge struct mutex bind_mutex; /* serialize bind/unbind operations */ 60*73cc4bb0SDavid Vrabel struct rb_root evtchns; 610a4666b5SJeremy Fitzhardinge 62f7116284SIan Campbell /* Notification ring, accessed via /dev/xen/evtchn. */ 63f7116284SIan Campbell #define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t)) 64f7116284SIan Campbell #define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1)) 65f7116284SIan Campbell evtchn_port_t *ring; 66f7116284SIan Campbell unsigned int ring_cons, ring_prod, ring_overflow; 67f7116284SIan Campbell struct mutex ring_cons_mutex; /* protect against concurrent readers */ 68*73cc4bb0SDavid Vrabel spinlock_t ring_prod_lock; /* product against concurrent interrupts */ 69f7116284SIan Campbell 70f7116284SIan Campbell /* Processes wait on this queue when ring is empty. */ 71f7116284SIan Campbell wait_queue_head_t evtchn_wait; 72f7116284SIan Campbell struct fasync_struct *evtchn_async_queue; 73f7116284SIan Campbell const char *name; 74f7116284SIan Campbell }; 75f7116284SIan Campbell 76*73cc4bb0SDavid Vrabel struct user_evtchn { 77*73cc4bb0SDavid Vrabel struct rb_node node; 78*73cc4bb0SDavid Vrabel struct per_user_data *user; 79*73cc4bb0SDavid Vrabel unsigned port; 80*73cc4bb0SDavid Vrabel bool enabled; 81*73cc4bb0SDavid Vrabel }; 82f7116284SIan Campbell 83*73cc4bb0SDavid Vrabel static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) 84e3cc067bSJeremy Fitzhardinge { 85*73cc4bb0SDavid Vrabel struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL; 86e3cc067bSJeremy Fitzhardinge 87*73cc4bb0SDavid Vrabel while (*new) { 88*73cc4bb0SDavid Vrabel struct user_evtchn *this; 89e3cc067bSJeremy Fitzhardinge 90*73cc4bb0SDavid Vrabel this = container_of(*new, struct user_evtchn, node); 91e3cc067bSJeremy Fitzhardinge 92*73cc4bb0SDavid Vrabel parent = *new; 93*73cc4bb0SDavid Vrabel if (this->port < evtchn->port) 94*73cc4bb0SDavid Vrabel new = &((*new)->rb_left); 95*73cc4bb0SDavid Vrabel else if (this->port > evtchn->port) 96*73cc4bb0SDavid Vrabel new = &((*new)->rb_right); 97e3cc067bSJeremy Fitzhardinge else 98*73cc4bb0SDavid Vrabel return -EEXIST; 99*73cc4bb0SDavid Vrabel } 100*73cc4bb0SDavid Vrabel 101*73cc4bb0SDavid Vrabel /* Add new node and rebalance tree. */ 102*73cc4bb0SDavid Vrabel rb_link_node(&evtchn->node, parent, new); 103*73cc4bb0SDavid Vrabel rb_insert_color(&evtchn->node, &u->evtchns); 104*73cc4bb0SDavid Vrabel 105*73cc4bb0SDavid Vrabel return 0; 106*73cc4bb0SDavid Vrabel } 107*73cc4bb0SDavid Vrabel 108*73cc4bb0SDavid Vrabel static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) 109*73cc4bb0SDavid Vrabel { 110*73cc4bb0SDavid Vrabel rb_erase(&evtchn->node, &u->evtchns); 111*73cc4bb0SDavid Vrabel kfree(evtchn); 112*73cc4bb0SDavid Vrabel } 113*73cc4bb0SDavid Vrabel 114*73cc4bb0SDavid Vrabel static struct user_evtchn *find_evtchn(struct per_user_data *u, unsigned port) 115*73cc4bb0SDavid Vrabel { 116*73cc4bb0SDavid Vrabel struct rb_node *node = u->evtchns.rb_node; 117*73cc4bb0SDavid Vrabel 118*73cc4bb0SDavid Vrabel while (node) { 119*73cc4bb0SDavid Vrabel struct user_evtchn *evtchn; 120*73cc4bb0SDavid Vrabel 121*73cc4bb0SDavid Vrabel evtchn = container_of(node, struct user_evtchn, node); 122*73cc4bb0SDavid Vrabel 123*73cc4bb0SDavid Vrabel if (evtchn->port < port) 124*73cc4bb0SDavid Vrabel node = node->rb_left; 125*73cc4bb0SDavid Vrabel else if (evtchn->port > port) 126*73cc4bb0SDavid Vrabel node = node->rb_right; 127*73cc4bb0SDavid Vrabel else 128*73cc4bb0SDavid Vrabel return evtchn; 129*73cc4bb0SDavid Vrabel } 130*73cc4bb0SDavid Vrabel return NULL; 131e3cc067bSJeremy Fitzhardinge } 132e3cc067bSJeremy Fitzhardinge 13370697d54SJeremy Fitzhardinge static irqreturn_t evtchn_interrupt(int irq, void *data) 134f7116284SIan Campbell { 135*73cc4bb0SDavid Vrabel struct user_evtchn *evtchn = data; 136*73cc4bb0SDavid Vrabel struct per_user_data *u = evtchn->user; 137f7116284SIan Campbell 138*73cc4bb0SDavid Vrabel WARN(!evtchn->enabled, 139e3cc067bSJeremy Fitzhardinge "Interrupt for port %d, but apparently not enabled; per-user %p\n", 140*73cc4bb0SDavid Vrabel evtchn->port, u); 141f7116284SIan Campbell 142f7116284SIan Campbell disable_irq_nosync(irq); 143*73cc4bb0SDavid Vrabel evtchn->enabled = false; 144*73cc4bb0SDavid Vrabel 145*73cc4bb0SDavid Vrabel spin_lock(&u->ring_prod_lock); 146f7116284SIan Campbell 147f7116284SIan Campbell if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { 148*73cc4bb0SDavid Vrabel u->ring[EVTCHN_RING_MASK(u->ring_prod)] = evtchn->port; 149f7116284SIan Campbell wmb(); /* Ensure ring contents visible */ 150f7116284SIan Campbell if (u->ring_cons == u->ring_prod++) { 151f7116284SIan Campbell wake_up_interruptible(&u->evtchn_wait); 152f7116284SIan Campbell kill_fasync(&u->evtchn_async_queue, 153f7116284SIan Campbell SIGIO, POLL_IN); 154f7116284SIan Campbell } 155e3cc067bSJeremy Fitzhardinge } else 156f7116284SIan Campbell u->ring_overflow = 1; 157f7116284SIan Campbell 158*73cc4bb0SDavid Vrabel spin_unlock(&u->ring_prod_lock); 159f7116284SIan Campbell 160f7116284SIan Campbell return IRQ_HANDLED; 161f7116284SIan Campbell } 162f7116284SIan Campbell 163f7116284SIan Campbell static ssize_t evtchn_read(struct file *file, char __user *buf, 164f7116284SIan Campbell size_t count, loff_t *ppos) 165f7116284SIan Campbell { 166f7116284SIan Campbell int rc; 167f7116284SIan Campbell unsigned int c, p, bytes1 = 0, bytes2 = 0; 168f7116284SIan Campbell struct per_user_data *u = file->private_data; 169f7116284SIan Campbell 170f7116284SIan Campbell /* Whole number of ports. */ 171f7116284SIan Campbell count &= ~(sizeof(evtchn_port_t)-1); 172f7116284SIan Campbell 173f7116284SIan Campbell if (count == 0) 174f7116284SIan Campbell return 0; 175f7116284SIan Campbell 176f7116284SIan Campbell if (count > PAGE_SIZE) 177f7116284SIan Campbell count = PAGE_SIZE; 178f7116284SIan Campbell 179f7116284SIan Campbell for (;;) { 180f7116284SIan Campbell mutex_lock(&u->ring_cons_mutex); 181f7116284SIan Campbell 182f7116284SIan Campbell rc = -EFBIG; 183f7116284SIan Campbell if (u->ring_overflow) 184f7116284SIan Campbell goto unlock_out; 185f7116284SIan Campbell 186f7116284SIan Campbell c = u->ring_cons; 187f7116284SIan Campbell p = u->ring_prod; 188f7116284SIan Campbell if (c != p) 189f7116284SIan Campbell break; 190f7116284SIan Campbell 191f7116284SIan Campbell mutex_unlock(&u->ring_cons_mutex); 192f7116284SIan Campbell 193f7116284SIan Campbell if (file->f_flags & O_NONBLOCK) 194f7116284SIan Campbell return -EAGAIN; 195f7116284SIan Campbell 196f7116284SIan Campbell rc = wait_event_interruptible(u->evtchn_wait, 197f7116284SIan Campbell u->ring_cons != u->ring_prod); 198f7116284SIan Campbell if (rc) 199f7116284SIan Campbell return rc; 200f7116284SIan Campbell } 201f7116284SIan Campbell 202f7116284SIan Campbell /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ 203f7116284SIan Campbell if (((c ^ p) & EVTCHN_RING_SIZE) != 0) { 204f7116284SIan Campbell bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * 205f7116284SIan Campbell sizeof(evtchn_port_t); 206f7116284SIan Campbell bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t); 207f7116284SIan Campbell } else { 208f7116284SIan Campbell bytes1 = (p - c) * sizeof(evtchn_port_t); 209f7116284SIan Campbell bytes2 = 0; 210f7116284SIan Campbell } 211f7116284SIan Campbell 212f7116284SIan Campbell /* Truncate chunks according to caller's maximum byte count. */ 213f7116284SIan Campbell if (bytes1 > count) { 214f7116284SIan Campbell bytes1 = count; 215f7116284SIan Campbell bytes2 = 0; 216f7116284SIan Campbell } else if ((bytes1 + bytes2) > count) { 217f7116284SIan Campbell bytes2 = count - bytes1; 218f7116284SIan Campbell } 219f7116284SIan Campbell 220f7116284SIan Campbell rc = -EFAULT; 221f7116284SIan Campbell rmb(); /* Ensure that we see the port before we copy it. */ 222f7116284SIan Campbell if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) || 223f7116284SIan Campbell ((bytes2 != 0) && 224f7116284SIan Campbell copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) 225f7116284SIan Campbell goto unlock_out; 226f7116284SIan Campbell 227f7116284SIan Campbell u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t); 228f7116284SIan Campbell rc = bytes1 + bytes2; 229f7116284SIan Campbell 230f7116284SIan Campbell unlock_out: 231f7116284SIan Campbell mutex_unlock(&u->ring_cons_mutex); 232f7116284SIan Campbell return rc; 233f7116284SIan Campbell } 234f7116284SIan Campbell 235f7116284SIan Campbell static ssize_t evtchn_write(struct file *file, const char __user *buf, 236f7116284SIan Campbell size_t count, loff_t *ppos) 237f7116284SIan Campbell { 238f7116284SIan Campbell int rc, i; 239f7116284SIan Campbell evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL); 240f7116284SIan Campbell struct per_user_data *u = file->private_data; 241f7116284SIan Campbell 242f7116284SIan Campbell if (kbuf == NULL) 243f7116284SIan Campbell return -ENOMEM; 244f7116284SIan Campbell 245f7116284SIan Campbell /* Whole number of ports. */ 246f7116284SIan Campbell count &= ~(sizeof(evtchn_port_t)-1); 247f7116284SIan Campbell 248f7116284SIan Campbell rc = 0; 249f7116284SIan Campbell if (count == 0) 250f7116284SIan Campbell goto out; 251f7116284SIan Campbell 252f7116284SIan Campbell if (count > PAGE_SIZE) 253f7116284SIan Campbell count = PAGE_SIZE; 254f7116284SIan Campbell 255f7116284SIan Campbell rc = -EFAULT; 256f7116284SIan Campbell if (copy_from_user(kbuf, buf, count) != 0) 257f7116284SIan Campbell goto out; 258f7116284SIan Campbell 259*73cc4bb0SDavid Vrabel mutex_lock(&u->bind_mutex); 260e3cc067bSJeremy Fitzhardinge 261e3cc067bSJeremy Fitzhardinge for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) { 262e3cc067bSJeremy Fitzhardinge unsigned port = kbuf[i]; 263*73cc4bb0SDavid Vrabel struct user_evtchn *evtchn; 264e3cc067bSJeremy Fitzhardinge 265*73cc4bb0SDavid Vrabel evtchn = find_evtchn(u, port); 266*73cc4bb0SDavid Vrabel if (evtchn && !evtchn->enabled) { 267*73cc4bb0SDavid Vrabel evtchn->enabled = true; 268e3cc067bSJeremy Fitzhardinge enable_irq(irq_from_evtchn(port)); 269e3cc067bSJeremy Fitzhardinge } 270e3cc067bSJeremy Fitzhardinge } 271e3cc067bSJeremy Fitzhardinge 272*73cc4bb0SDavid Vrabel mutex_unlock(&u->bind_mutex); 273f7116284SIan Campbell 274f7116284SIan Campbell rc = count; 275f7116284SIan Campbell 276f7116284SIan Campbell out: 277f7116284SIan Campbell free_page((unsigned long)kbuf); 278f7116284SIan Campbell return rc; 279f7116284SIan Campbell } 280f7116284SIan Campbell 281f7116284SIan Campbell static int evtchn_bind_to_user(struct per_user_data *u, int port) 282f7116284SIan Campbell { 283*73cc4bb0SDavid Vrabel struct user_evtchn *evtchn; 284*73cc4bb0SDavid Vrabel struct evtchn_close close; 285f7116284SIan Campbell int rc = 0; 286f7116284SIan Campbell 2870a4666b5SJeremy Fitzhardinge /* 2880a4666b5SJeremy Fitzhardinge * Ports are never reused, so every caller should pass in a 2890a4666b5SJeremy Fitzhardinge * unique port. 2900a4666b5SJeremy Fitzhardinge * 2910a4666b5SJeremy Fitzhardinge * (Locking not necessary because we haven't registered the 2920a4666b5SJeremy Fitzhardinge * interrupt handler yet, and our caller has already 2930a4666b5SJeremy Fitzhardinge * serialized bind operations.) 2940a4666b5SJeremy Fitzhardinge */ 295*73cc4bb0SDavid Vrabel 296*73cc4bb0SDavid Vrabel evtchn = kzalloc(sizeof(*evtchn), GFP_KERNEL); 297*73cc4bb0SDavid Vrabel if (!evtchn) 298*73cc4bb0SDavid Vrabel return -ENOMEM; 299*73cc4bb0SDavid Vrabel 300*73cc4bb0SDavid Vrabel evtchn->user = u; 301*73cc4bb0SDavid Vrabel evtchn->port = port; 302*73cc4bb0SDavid Vrabel evtchn->enabled = true; /* start enabled */ 303*73cc4bb0SDavid Vrabel 304*73cc4bb0SDavid Vrabel rc = add_evtchn(u, evtchn); 305*73cc4bb0SDavid Vrabel if (rc < 0) 306*73cc4bb0SDavid Vrabel goto err; 307f7116284SIan Campbell 3080a4666b5SJeremy Fitzhardinge rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, 309*73cc4bb0SDavid Vrabel u->name, evtchn); 310*73cc4bb0SDavid Vrabel if (rc < 0) 311*73cc4bb0SDavid Vrabel goto err; 312*73cc4bb0SDavid Vrabel 313420eb554SDaniel De Graaf rc = evtchn_make_refcounted(port); 314*73cc4bb0SDavid Vrabel return rc; 315*73cc4bb0SDavid Vrabel 316*73cc4bb0SDavid Vrabel err: 317e7e44e44SWei Liu /* bind failed, should close the port now */ 318e7e44e44SWei Liu close.port = port; 319e7e44e44SWei Liu if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 320e7e44e44SWei Liu BUG(); 321*73cc4bb0SDavid Vrabel del_evtchn(u, evtchn); 322*73cc4bb0SDavid Vrabel kfree(evtchn); 323f7116284SIan Campbell return rc; 324f7116284SIan Campbell } 325f7116284SIan Campbell 326*73cc4bb0SDavid Vrabel static void evtchn_unbind_from_user(struct per_user_data *u, 327*73cc4bb0SDavid Vrabel struct user_evtchn *evtchn) 328f7116284SIan Campbell { 329*73cc4bb0SDavid Vrabel int irq = irq_from_evtchn(evtchn->port); 330f7116284SIan Campbell 331e7e44e44SWei Liu BUG_ON(irq < 0); 332e7e44e44SWei Liu 333*73cc4bb0SDavid Vrabel unbind_from_irqhandler(irq, evtchn); 3340a4666b5SJeremy Fitzhardinge 335*73cc4bb0SDavid Vrabel del_evtchn(u, evtchn); 336f7116284SIan Campbell } 337f7116284SIan Campbell 338f7116284SIan Campbell static long evtchn_ioctl(struct file *file, 339f7116284SIan Campbell unsigned int cmd, unsigned long arg) 340f7116284SIan Campbell { 341f7116284SIan Campbell int rc; 342f7116284SIan Campbell struct per_user_data *u = file->private_data; 343f7116284SIan Campbell void __user *uarg = (void __user *) arg; 344f7116284SIan Campbell 3450a4666b5SJeremy Fitzhardinge /* Prevent bind from racing with unbind */ 3460a4666b5SJeremy Fitzhardinge mutex_lock(&u->bind_mutex); 3470a4666b5SJeremy Fitzhardinge 348f7116284SIan Campbell switch (cmd) { 349f7116284SIan Campbell case IOCTL_EVTCHN_BIND_VIRQ: { 350f7116284SIan Campbell struct ioctl_evtchn_bind_virq bind; 351f7116284SIan Campbell struct evtchn_bind_virq bind_virq; 352f7116284SIan Campbell 353f7116284SIan Campbell rc = -EFAULT; 354f7116284SIan Campbell if (copy_from_user(&bind, uarg, sizeof(bind))) 355f7116284SIan Campbell break; 356f7116284SIan Campbell 357f7116284SIan Campbell bind_virq.virq = bind.virq; 358f7116284SIan Campbell bind_virq.vcpu = 0; 359f7116284SIan Campbell rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 360f7116284SIan Campbell &bind_virq); 361f7116284SIan Campbell if (rc != 0) 362f7116284SIan Campbell break; 363f7116284SIan Campbell 364f7116284SIan Campbell rc = evtchn_bind_to_user(u, bind_virq.port); 365f7116284SIan Campbell if (rc == 0) 366f7116284SIan Campbell rc = bind_virq.port; 367f7116284SIan Campbell break; 368f7116284SIan Campbell } 369f7116284SIan Campbell 370f7116284SIan Campbell case IOCTL_EVTCHN_BIND_INTERDOMAIN: { 371f7116284SIan Campbell struct ioctl_evtchn_bind_interdomain bind; 372f7116284SIan Campbell struct evtchn_bind_interdomain bind_interdomain; 373f7116284SIan Campbell 374f7116284SIan Campbell rc = -EFAULT; 375f7116284SIan Campbell if (copy_from_user(&bind, uarg, sizeof(bind))) 376f7116284SIan Campbell break; 377f7116284SIan Campbell 378f7116284SIan Campbell bind_interdomain.remote_dom = bind.remote_domain; 379f7116284SIan Campbell bind_interdomain.remote_port = bind.remote_port; 380f7116284SIan Campbell rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, 381f7116284SIan Campbell &bind_interdomain); 382f7116284SIan Campbell if (rc != 0) 383f7116284SIan Campbell break; 384f7116284SIan Campbell 385f7116284SIan Campbell rc = evtchn_bind_to_user(u, bind_interdomain.local_port); 386f7116284SIan Campbell if (rc == 0) 387f7116284SIan Campbell rc = bind_interdomain.local_port; 388f7116284SIan Campbell break; 389f7116284SIan Campbell } 390f7116284SIan Campbell 391f7116284SIan Campbell case IOCTL_EVTCHN_BIND_UNBOUND_PORT: { 392f7116284SIan Campbell struct ioctl_evtchn_bind_unbound_port bind; 393f7116284SIan Campbell struct evtchn_alloc_unbound alloc_unbound; 394f7116284SIan Campbell 395f7116284SIan Campbell rc = -EFAULT; 396f7116284SIan Campbell if (copy_from_user(&bind, uarg, sizeof(bind))) 397f7116284SIan Campbell break; 398f7116284SIan Campbell 399f7116284SIan Campbell alloc_unbound.dom = DOMID_SELF; 400f7116284SIan Campbell alloc_unbound.remote_dom = bind.remote_domain; 401f7116284SIan Campbell rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, 402f7116284SIan Campbell &alloc_unbound); 403f7116284SIan Campbell if (rc != 0) 404f7116284SIan Campbell break; 405f7116284SIan Campbell 406f7116284SIan Campbell rc = evtchn_bind_to_user(u, alloc_unbound.port); 407f7116284SIan Campbell if (rc == 0) 408f7116284SIan Campbell rc = alloc_unbound.port; 409f7116284SIan Campbell break; 410f7116284SIan Campbell } 411f7116284SIan Campbell 412f7116284SIan Campbell case IOCTL_EVTCHN_UNBIND: { 413f7116284SIan Campbell struct ioctl_evtchn_unbind unbind; 414*73cc4bb0SDavid Vrabel struct user_evtchn *evtchn; 415f7116284SIan Campbell 416f7116284SIan Campbell rc = -EFAULT; 417f7116284SIan Campbell if (copy_from_user(&unbind, uarg, sizeof(unbind))) 418f7116284SIan Campbell break; 419f7116284SIan Campbell 420f7116284SIan Campbell rc = -EINVAL; 421f7116284SIan Campbell if (unbind.port >= NR_EVENT_CHANNELS) 422f7116284SIan Campbell break; 423f7116284SIan Campbell 424f7116284SIan Campbell rc = -ENOTCONN; 425*73cc4bb0SDavid Vrabel evtchn = find_evtchn(u, unbind.port); 426*73cc4bb0SDavid Vrabel if (!evtchn) 427f7116284SIan Campbell break; 428f7116284SIan Campbell 4293f5e554fSJeremy Fitzhardinge disable_irq(irq_from_evtchn(unbind.port)); 430*73cc4bb0SDavid Vrabel evtchn_unbind_from_user(u, evtchn); 431f7116284SIan Campbell rc = 0; 432f7116284SIan Campbell break; 433f7116284SIan Campbell } 434f7116284SIan Campbell 435f7116284SIan Campbell case IOCTL_EVTCHN_NOTIFY: { 436f7116284SIan Campbell struct ioctl_evtchn_notify notify; 437*73cc4bb0SDavid Vrabel struct user_evtchn *evtchn; 438f7116284SIan Campbell 439f7116284SIan Campbell rc = -EFAULT; 440f7116284SIan Campbell if (copy_from_user(¬ify, uarg, sizeof(notify))) 441f7116284SIan Campbell break; 442f7116284SIan Campbell 443f7116284SIan Campbell rc = -ENOTCONN; 444*73cc4bb0SDavid Vrabel evtchn = find_evtchn(u, notify.port); 445*73cc4bb0SDavid Vrabel if (evtchn) { 446f7116284SIan Campbell notify_remote_via_evtchn(notify.port); 447f7116284SIan Campbell rc = 0; 448f7116284SIan Campbell } 449f7116284SIan Campbell break; 450f7116284SIan Campbell } 451f7116284SIan Campbell 452f7116284SIan Campbell case IOCTL_EVTCHN_RESET: { 453f7116284SIan Campbell /* Initialise the ring to empty. Clear errors. */ 454f7116284SIan Campbell mutex_lock(&u->ring_cons_mutex); 455*73cc4bb0SDavid Vrabel spin_lock_irq(&u->ring_prod_lock); 456f7116284SIan Campbell u->ring_cons = u->ring_prod = u->ring_overflow = 0; 457*73cc4bb0SDavid Vrabel spin_unlock_irq(&u->ring_prod_lock); 458f7116284SIan Campbell mutex_unlock(&u->ring_cons_mutex); 459f7116284SIan Campbell rc = 0; 460f7116284SIan Campbell break; 461f7116284SIan Campbell } 462f7116284SIan Campbell 463f7116284SIan Campbell default: 464f7116284SIan Campbell rc = -ENOSYS; 465f7116284SIan Campbell break; 466f7116284SIan Campbell } 4670a4666b5SJeremy Fitzhardinge mutex_unlock(&u->bind_mutex); 468f7116284SIan Campbell 469f7116284SIan Campbell return rc; 470f7116284SIan Campbell } 471f7116284SIan Campbell 472f7116284SIan Campbell static unsigned int evtchn_poll(struct file *file, poll_table *wait) 473f7116284SIan Campbell { 474f7116284SIan Campbell unsigned int mask = POLLOUT | POLLWRNORM; 475f7116284SIan Campbell struct per_user_data *u = file->private_data; 476f7116284SIan Campbell 477f7116284SIan Campbell poll_wait(file, &u->evtchn_wait, wait); 478f7116284SIan Campbell if (u->ring_cons != u->ring_prod) 479f7116284SIan Campbell mask |= POLLIN | POLLRDNORM; 480f7116284SIan Campbell if (u->ring_overflow) 481f7116284SIan Campbell mask = POLLERR; 482f7116284SIan Campbell return mask; 483f7116284SIan Campbell } 484f7116284SIan Campbell 485f7116284SIan Campbell static int evtchn_fasync(int fd, struct file *filp, int on) 486f7116284SIan Campbell { 487f7116284SIan Campbell struct per_user_data *u = filp->private_data; 488f7116284SIan Campbell return fasync_helper(fd, filp, on, &u->evtchn_async_queue); 489f7116284SIan Campbell } 490f7116284SIan Campbell 491f7116284SIan Campbell static int evtchn_open(struct inode *inode, struct file *filp) 492f7116284SIan Campbell { 493f7116284SIan Campbell struct per_user_data *u; 494f7116284SIan Campbell 495f7116284SIan Campbell u = kzalloc(sizeof(*u), GFP_KERNEL); 496f7116284SIan Campbell if (u == NULL) 497f7116284SIan Campbell return -ENOMEM; 498f7116284SIan Campbell 499f7116284SIan Campbell u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm); 500f7116284SIan Campbell if (u->name == NULL) { 501f7116284SIan Campbell kfree(u); 502f7116284SIan Campbell return -ENOMEM; 503f7116284SIan Campbell } 504f7116284SIan Campbell 505f7116284SIan Campbell init_waitqueue_head(&u->evtchn_wait); 506f7116284SIan Campbell 507f7116284SIan Campbell u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL); 508f7116284SIan Campbell if (u->ring == NULL) { 509f7116284SIan Campbell kfree(u->name); 510f7116284SIan Campbell kfree(u); 511f7116284SIan Campbell return -ENOMEM; 512f7116284SIan Campbell } 513f7116284SIan Campbell 5140a4666b5SJeremy Fitzhardinge mutex_init(&u->bind_mutex); 515f7116284SIan Campbell mutex_init(&u->ring_cons_mutex); 516*73cc4bb0SDavid Vrabel spin_lock_init(&u->ring_prod_lock); 517f7116284SIan Campbell 518f7116284SIan Campbell filp->private_data = u; 519f7116284SIan Campbell 5206eab04a8SJustin P. Mattock return nonseekable_open(inode, filp); 521f7116284SIan Campbell } 522f7116284SIan Campbell 523f7116284SIan Campbell static int evtchn_release(struct inode *inode, struct file *filp) 524f7116284SIan Campbell { 525f7116284SIan Campbell struct per_user_data *u = filp->private_data; 526*73cc4bb0SDavid Vrabel struct rb_node *node; 527f7116284SIan Campbell 528*73cc4bb0SDavid Vrabel while ((node = u->evtchns.rb_node)) { 529*73cc4bb0SDavid Vrabel struct user_evtchn *evtchn; 530f7116284SIan Campbell 531*73cc4bb0SDavid Vrabel evtchn = rb_entry(node, struct user_evtchn, node); 532*73cc4bb0SDavid Vrabel disable_irq(irq_from_evtchn(evtchn->port)); 533*73cc4bb0SDavid Vrabel evtchn_unbind_from_user(u, evtchn); 5343f5e554fSJeremy Fitzhardinge } 5353f5e554fSJeremy Fitzhardinge 536179fbd5aSDavid Vrabel free_page((unsigned long)u->ring); 537f7116284SIan Campbell kfree(u->name); 538f7116284SIan Campbell kfree(u); 539f7116284SIan Campbell 540f7116284SIan Campbell return 0; 541f7116284SIan Campbell } 542f7116284SIan Campbell 543f7116284SIan Campbell static const struct file_operations evtchn_fops = { 544f7116284SIan Campbell .owner = THIS_MODULE, 545f7116284SIan Campbell .read = evtchn_read, 546f7116284SIan Campbell .write = evtchn_write, 547f7116284SIan Campbell .unlocked_ioctl = evtchn_ioctl, 548f7116284SIan Campbell .poll = evtchn_poll, 549f7116284SIan Campbell .fasync = evtchn_fasync, 550f7116284SIan Campbell .open = evtchn_open, 551f7116284SIan Campbell .release = evtchn_release, 552bc7fc5e3SJeremy Fitzhardinge .llseek = no_llseek, 553f7116284SIan Campbell }; 554f7116284SIan Campbell 555f7116284SIan Campbell static struct miscdevice evtchn_miscdev = { 556f7116284SIan Campbell .minor = MISC_DYNAMIC_MINOR, 557376d908fSBastian Blank .name = "xen/evtchn", 558f7116284SIan Campbell .fops = &evtchn_fops, 559f7116284SIan Campbell }; 560f7116284SIan Campbell static int __init evtchn_init(void) 561f7116284SIan Campbell { 562f7116284SIan Campbell int err; 563f7116284SIan Campbell 564f7116284SIan Campbell if (!xen_domain()) 565f7116284SIan Campbell return -ENODEV; 566f7116284SIan Campbell 56718283ea7SWei Liu /* Create '/dev/xen/evtchn'. */ 568f7116284SIan Campbell err = misc_register(&evtchn_miscdev); 569f7116284SIan Campbell if (err != 0) { 570283c0972SJoe Perches pr_err("Could not register /dev/xen/evtchn\n"); 571f7116284SIan Campbell return err; 572f7116284SIan Campbell } 573f7116284SIan Campbell 574283c0972SJoe Perches pr_info("Event-channel device installed\n"); 575f7116284SIan Campbell 576f7116284SIan Campbell return 0; 577f7116284SIan Campbell } 578f7116284SIan Campbell 579f7116284SIan Campbell static void __exit evtchn_cleanup(void) 580f7116284SIan Campbell { 581f7116284SIan Campbell misc_deregister(&evtchn_miscdev); 582f7116284SIan Campbell } 583f7116284SIan Campbell 584f7116284SIan Campbell module_init(evtchn_init); 585f7116284SIan Campbell module_exit(evtchn_cleanup); 586f7116284SIan Campbell 587f7116284SIan Campbell MODULE_LICENSE("GPL"); 588