1f7116284SIan Campbell /****************************************************************************** 2f7116284SIan Campbell * evtchn.c 3f7116284SIan Campbell * 4f7116284SIan Campbell * Driver for receiving and demuxing event-channel signals. 5f7116284SIan Campbell * 6f7116284SIan Campbell * Copyright (c) 2004-2005, K A Fraser 7f7116284SIan Campbell * Multi-process extensions Copyright (c) 2004, Steven Smith 8f7116284SIan Campbell * 9f7116284SIan Campbell * This program is free software; you can redistribute it and/or 10f7116284SIan Campbell * modify it under the terms of the GNU General Public License version 2 11f7116284SIan Campbell * as published by the Free Software Foundation; or, when distributed 12f7116284SIan Campbell * separately from the Linux kernel or incorporated into other 13f7116284SIan Campbell * software packages, subject to the following license: 14f7116284SIan Campbell * 15f7116284SIan Campbell * Permission is hereby granted, free of charge, to any person obtaining a copy 16f7116284SIan Campbell * of this source file (the "Software"), to deal in the Software without 17f7116284SIan Campbell * restriction, including without limitation the rights to use, copy, modify, 18f7116284SIan Campbell * merge, publish, distribute, sublicense, and/or sell copies of the Software, 19f7116284SIan Campbell * and to permit persons to whom the Software is furnished to do so, subject to 20f7116284SIan Campbell * the following conditions: 21f7116284SIan Campbell * 22f7116284SIan Campbell * The above copyright notice and this permission notice shall be included in 23f7116284SIan Campbell * all copies or substantial portions of the Software. 24f7116284SIan Campbell * 25f7116284SIan Campbell * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26f7116284SIan Campbell * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27f7116284SIan Campbell * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 28f7116284SIan Campbell * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 29f7116284SIan Campbell * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 30f7116284SIan Campbell * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 31f7116284SIan Campbell * IN THE SOFTWARE. 32f7116284SIan Campbell */ 33f7116284SIan Campbell 34283c0972SJoe Perches #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 35283c0972SJoe Perches 36f7116284SIan Campbell #include <linux/module.h> 37f7116284SIan Campbell #include <linux/kernel.h> 38f7116284SIan Campbell #include <linux/sched.h> 39f7116284SIan Campbell #include <linux/slab.h> 40f7116284SIan Campbell #include <linux/string.h> 41f7116284SIan Campbell #include <linux/errno.h> 42f7116284SIan Campbell #include <linux/fs.h> 43f7116284SIan Campbell #include <linux/miscdevice.h> 44f7116284SIan Campbell #include <linux/major.h> 45f7116284SIan Campbell #include <linux/proc_fs.h> 46f7116284SIan Campbell #include <linux/stat.h> 47f7116284SIan Campbell #include <linux/poll.h> 48f7116284SIan Campbell #include <linux/irq.h> 49f7116284SIan Campbell #include <linux/init.h> 50f7116284SIan Campbell #include <linux/mutex.h> 51f7116284SIan Campbell #include <linux/cpu.h> 5286200154SDavid Vrabel #include <linux/mm.h> 5386200154SDavid Vrabel #include <linux/vmalloc.h> 541ccbf534SJeremy Fitzhardinge 551ccbf534SJeremy Fitzhardinge #include <xen/xen.h> 56f7116284SIan Campbell #include <xen/events.h> 57f7116284SIan Campbell #include <xen/evtchn.h> 58cbbb4682SVitaly Kuznetsov #include <xen/xen-ops.h> 59f7116284SIan Campbell #include <asm/xen/hypervisor.h> 60f7116284SIan Campbell 61f7116284SIan Campbell struct per_user_data { 620a4666b5SJeremy Fitzhardinge struct mutex bind_mutex; /* serialize bind/unbind operations */ 6373cc4bb0SDavid Vrabel struct rb_root evtchns; 6486200154SDavid Vrabel unsigned int nr_evtchns; 650a4666b5SJeremy Fitzhardinge 66f7116284SIan Campbell /* Notification ring, accessed via /dev/xen/evtchn. */ 6786200154SDavid Vrabel unsigned int ring_size; 68f7116284SIan Campbell evtchn_port_t *ring; 69f7116284SIan Campbell unsigned int ring_cons, ring_prod, ring_overflow; 70f7116284SIan Campbell struct mutex ring_cons_mutex; /* protect against concurrent readers */ 7173cc4bb0SDavid Vrabel spinlock_t ring_prod_lock; /* product against concurrent interrupts */ 72f7116284SIan Campbell 73f7116284SIan Campbell /* Processes wait on this queue when ring is empty. */ 74f7116284SIan Campbell wait_queue_head_t evtchn_wait; 75f7116284SIan Campbell struct fasync_struct *evtchn_async_queue; 76f7116284SIan Campbell const char *name; 77fbc872c3SDavid Vrabel 78fbc872c3SDavid Vrabel domid_t restrict_domid; 79f7116284SIan Campbell }; 80f7116284SIan Campbell 81fbc872c3SDavid Vrabel #define UNRESTRICTED_DOMID ((domid_t)-1) 82fbc872c3SDavid Vrabel 8373cc4bb0SDavid Vrabel struct user_evtchn { 8473cc4bb0SDavid Vrabel struct rb_node node; 8573cc4bb0SDavid Vrabel struct per_user_data *user; 860102e4efSYan Yankovskyi evtchn_port_t port; 8773cc4bb0SDavid Vrabel bool enabled; 8873cc4bb0SDavid Vrabel }; 89f7116284SIan Campbell 9086200154SDavid Vrabel static void evtchn_free_ring(evtchn_port_t *ring) 9186200154SDavid Vrabel { 9286200154SDavid Vrabel kvfree(ring); 9386200154SDavid Vrabel } 9486200154SDavid Vrabel 9586200154SDavid Vrabel static unsigned int evtchn_ring_offset(struct per_user_data *u, 9686200154SDavid Vrabel unsigned int idx) 9786200154SDavid Vrabel { 9886200154SDavid Vrabel return idx & (u->ring_size - 1); 9986200154SDavid Vrabel } 10086200154SDavid Vrabel 10186200154SDavid Vrabel static evtchn_port_t *evtchn_ring_entry(struct per_user_data *u, 10286200154SDavid Vrabel unsigned int idx) 10386200154SDavid Vrabel { 10486200154SDavid Vrabel return u->ring + evtchn_ring_offset(u, idx); 10586200154SDavid Vrabel } 10686200154SDavid Vrabel 10773cc4bb0SDavid Vrabel static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) 108e3cc067bSJeremy Fitzhardinge { 10973cc4bb0SDavid Vrabel struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL; 110e3cc067bSJeremy Fitzhardinge 11186200154SDavid Vrabel u->nr_evtchns++; 11286200154SDavid Vrabel 11373cc4bb0SDavid Vrabel while (*new) { 11473cc4bb0SDavid Vrabel struct user_evtchn *this; 115e3cc067bSJeremy Fitzhardinge 1162f60b288SGeliang Tang this = rb_entry(*new, struct user_evtchn, node); 117e3cc067bSJeremy Fitzhardinge 11873cc4bb0SDavid Vrabel parent = *new; 11973cc4bb0SDavid Vrabel if (this->port < evtchn->port) 12073cc4bb0SDavid Vrabel new = &((*new)->rb_left); 12173cc4bb0SDavid Vrabel else if (this->port > evtchn->port) 12273cc4bb0SDavid Vrabel new = &((*new)->rb_right); 123e3cc067bSJeremy Fitzhardinge else 12473cc4bb0SDavid Vrabel return -EEXIST; 12573cc4bb0SDavid Vrabel } 12673cc4bb0SDavid Vrabel 12773cc4bb0SDavid Vrabel /* Add new node and rebalance tree. */ 12873cc4bb0SDavid Vrabel rb_link_node(&evtchn->node, parent, new); 12973cc4bb0SDavid Vrabel rb_insert_color(&evtchn->node, &u->evtchns); 13073cc4bb0SDavid Vrabel 13173cc4bb0SDavid Vrabel return 0; 13273cc4bb0SDavid Vrabel } 13373cc4bb0SDavid Vrabel 13473cc4bb0SDavid Vrabel static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) 13573cc4bb0SDavid Vrabel { 13686200154SDavid Vrabel u->nr_evtchns--; 13773cc4bb0SDavid Vrabel rb_erase(&evtchn->node, &u->evtchns); 13873cc4bb0SDavid Vrabel kfree(evtchn); 13973cc4bb0SDavid Vrabel } 14073cc4bb0SDavid Vrabel 1410102e4efSYan Yankovskyi static struct user_evtchn *find_evtchn(struct per_user_data *u, 1420102e4efSYan Yankovskyi evtchn_port_t port) 14373cc4bb0SDavid Vrabel { 14473cc4bb0SDavid Vrabel struct rb_node *node = u->evtchns.rb_node; 14573cc4bb0SDavid Vrabel 14673cc4bb0SDavid Vrabel while (node) { 14773cc4bb0SDavid Vrabel struct user_evtchn *evtchn; 14873cc4bb0SDavid Vrabel 1492f60b288SGeliang Tang evtchn = rb_entry(node, struct user_evtchn, node); 15073cc4bb0SDavid Vrabel 15173cc4bb0SDavid Vrabel if (evtchn->port < port) 15273cc4bb0SDavid Vrabel node = node->rb_left; 15373cc4bb0SDavid Vrabel else if (evtchn->port > port) 15473cc4bb0SDavid Vrabel node = node->rb_right; 15573cc4bb0SDavid Vrabel else 15673cc4bb0SDavid Vrabel return evtchn; 15773cc4bb0SDavid Vrabel } 15873cc4bb0SDavid Vrabel return NULL; 159e3cc067bSJeremy Fitzhardinge } 160e3cc067bSJeremy Fitzhardinge 16170697d54SJeremy Fitzhardinge static irqreturn_t evtchn_interrupt(int irq, void *data) 162f7116284SIan Campbell { 16373cc4bb0SDavid Vrabel struct user_evtchn *evtchn = data; 16473cc4bb0SDavid Vrabel struct per_user_data *u = evtchn->user; 1656977c0b5SJuergen Gross unsigned int prod, cons; 166f7116284SIan Campbell 16773cc4bb0SDavid Vrabel WARN(!evtchn->enabled, 1680102e4efSYan Yankovskyi "Interrupt for port %u, but apparently not enabled; per-user %p\n", 16973cc4bb0SDavid Vrabel evtchn->port, u); 170f7116284SIan Campbell 17173cc4bb0SDavid Vrabel evtchn->enabled = false; 17273cc4bb0SDavid Vrabel 17373cc4bb0SDavid Vrabel spin_lock(&u->ring_prod_lock); 174f7116284SIan Campbell 1756977c0b5SJuergen Gross prod = READ_ONCE(u->ring_prod); 1766977c0b5SJuergen Gross cons = READ_ONCE(u->ring_cons); 1776977c0b5SJuergen Gross 1786977c0b5SJuergen Gross if ((prod - cons) < u->ring_size) { 1796977c0b5SJuergen Gross *evtchn_ring_entry(u, prod) = evtchn->port; 180d5669959SJuergen Gross smp_wmb(); /* Ensure ring contents visible */ 1816977c0b5SJuergen Gross WRITE_ONCE(u->ring_prod, prod + 1); 1826977c0b5SJuergen Gross if (cons == prod) { 183f7116284SIan Campbell wake_up_interruptible(&u->evtchn_wait); 184f7116284SIan Campbell kill_fasync(&u->evtchn_async_queue, 185f7116284SIan Campbell SIGIO, POLL_IN); 186f7116284SIan Campbell } 187e3cc067bSJeremy Fitzhardinge } else 188f7116284SIan Campbell u->ring_overflow = 1; 189f7116284SIan Campbell 19073cc4bb0SDavid Vrabel spin_unlock(&u->ring_prod_lock); 191f7116284SIan Campbell 192f7116284SIan Campbell return IRQ_HANDLED; 193f7116284SIan Campbell } 194f7116284SIan Campbell 195f7116284SIan Campbell static ssize_t evtchn_read(struct file *file, char __user *buf, 196f7116284SIan Campbell size_t count, loff_t *ppos) 197f7116284SIan Campbell { 198f7116284SIan Campbell int rc; 199f7116284SIan Campbell unsigned int c, p, bytes1 = 0, bytes2 = 0; 200f7116284SIan Campbell struct per_user_data *u = file->private_data; 201f7116284SIan Campbell 202f7116284SIan Campbell /* Whole number of ports. */ 203f7116284SIan Campbell count &= ~(sizeof(evtchn_port_t)-1); 204f7116284SIan Campbell 205f7116284SIan Campbell if (count == 0) 206f7116284SIan Campbell return 0; 207f7116284SIan Campbell 208f7116284SIan Campbell if (count > PAGE_SIZE) 209f7116284SIan Campbell count = PAGE_SIZE; 210f7116284SIan Campbell 211f7116284SIan Campbell for (;;) { 212f7116284SIan Campbell mutex_lock(&u->ring_cons_mutex); 213f7116284SIan Campbell 214f7116284SIan Campbell rc = -EFBIG; 215f7116284SIan Campbell if (u->ring_overflow) 216f7116284SIan Campbell goto unlock_out; 217f7116284SIan Campbell 2186977c0b5SJuergen Gross c = READ_ONCE(u->ring_cons); 2196977c0b5SJuergen Gross p = READ_ONCE(u->ring_prod); 220f7116284SIan Campbell if (c != p) 221f7116284SIan Campbell break; 222f7116284SIan Campbell 223f7116284SIan Campbell mutex_unlock(&u->ring_cons_mutex); 224f7116284SIan Campbell 225f7116284SIan Campbell if (file->f_flags & O_NONBLOCK) 226f7116284SIan Campbell return -EAGAIN; 227f7116284SIan Campbell 228f7116284SIan Campbell rc = wait_event_interruptible(u->evtchn_wait, 2296977c0b5SJuergen Gross READ_ONCE(u->ring_cons) != READ_ONCE(u->ring_prod)); 230f7116284SIan Campbell if (rc) 231f7116284SIan Campbell return rc; 232f7116284SIan Campbell } 233f7116284SIan Campbell 234f7116284SIan Campbell /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ 23586200154SDavid Vrabel if (((c ^ p) & u->ring_size) != 0) { 23686200154SDavid Vrabel bytes1 = (u->ring_size - evtchn_ring_offset(u, c)) * 237f7116284SIan Campbell sizeof(evtchn_port_t); 23886200154SDavid Vrabel bytes2 = evtchn_ring_offset(u, p) * sizeof(evtchn_port_t); 239f7116284SIan Campbell } else { 240f7116284SIan Campbell bytes1 = (p - c) * sizeof(evtchn_port_t); 241f7116284SIan Campbell bytes2 = 0; 242f7116284SIan Campbell } 243f7116284SIan Campbell 244f7116284SIan Campbell /* Truncate chunks according to caller's maximum byte count. */ 245f7116284SIan Campbell if (bytes1 > count) { 246f7116284SIan Campbell bytes1 = count; 247f7116284SIan Campbell bytes2 = 0; 248f7116284SIan Campbell } else if ((bytes1 + bytes2) > count) { 249f7116284SIan Campbell bytes2 = count - bytes1; 250f7116284SIan Campbell } 251f7116284SIan Campbell 252f7116284SIan Campbell rc = -EFAULT; 253d5669959SJuergen Gross smp_rmb(); /* Ensure that we see the port before we copy it. */ 25486200154SDavid Vrabel if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) || 255f7116284SIan Campbell ((bytes2 != 0) && 256f7116284SIan Campbell copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) 257f7116284SIan Campbell goto unlock_out; 258f7116284SIan Campbell 2596977c0b5SJuergen Gross WRITE_ONCE(u->ring_cons, c + (bytes1 + bytes2) / sizeof(evtchn_port_t)); 260f7116284SIan Campbell rc = bytes1 + bytes2; 261f7116284SIan Campbell 262f7116284SIan Campbell unlock_out: 263f7116284SIan Campbell mutex_unlock(&u->ring_cons_mutex); 264f7116284SIan Campbell return rc; 265f7116284SIan Campbell } 266f7116284SIan Campbell 267f7116284SIan Campbell static ssize_t evtchn_write(struct file *file, const char __user *buf, 268f7116284SIan Campbell size_t count, loff_t *ppos) 269f7116284SIan Campbell { 270f7116284SIan Campbell int rc, i; 271f7116284SIan Campbell evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL); 272f7116284SIan Campbell struct per_user_data *u = file->private_data; 273f7116284SIan Campbell 274f7116284SIan Campbell if (kbuf == NULL) 275f7116284SIan Campbell return -ENOMEM; 276f7116284SIan Campbell 277f7116284SIan Campbell /* Whole number of ports. */ 278f7116284SIan Campbell count &= ~(sizeof(evtchn_port_t)-1); 279f7116284SIan Campbell 280f7116284SIan Campbell rc = 0; 281f7116284SIan Campbell if (count == 0) 282f7116284SIan Campbell goto out; 283f7116284SIan Campbell 284f7116284SIan Campbell if (count > PAGE_SIZE) 285f7116284SIan Campbell count = PAGE_SIZE; 286f7116284SIan Campbell 287f7116284SIan Campbell rc = -EFAULT; 288f7116284SIan Campbell if (copy_from_user(kbuf, buf, count) != 0) 289f7116284SIan Campbell goto out; 290f7116284SIan Campbell 29173cc4bb0SDavid Vrabel mutex_lock(&u->bind_mutex); 292e3cc067bSJeremy Fitzhardinge 293e3cc067bSJeremy Fitzhardinge for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) { 2940102e4efSYan Yankovskyi evtchn_port_t port = kbuf[i]; 29573cc4bb0SDavid Vrabel struct user_evtchn *evtchn; 296e3cc067bSJeremy Fitzhardinge 29773cc4bb0SDavid Vrabel evtchn = find_evtchn(u, port); 29873cc4bb0SDavid Vrabel if (evtchn && !evtchn->enabled) { 29973cc4bb0SDavid Vrabel evtchn->enabled = true; 300c44b849cSJuergen Gross xen_irq_lateeoi(irq_from_evtchn(port), 0); 301e3cc067bSJeremy Fitzhardinge } 302e3cc067bSJeremy Fitzhardinge } 303e3cc067bSJeremy Fitzhardinge 30473cc4bb0SDavid Vrabel mutex_unlock(&u->bind_mutex); 305f7116284SIan Campbell 306f7116284SIan Campbell rc = count; 307f7116284SIan Campbell 308f7116284SIan Campbell out: 309f7116284SIan Campbell free_page((unsigned long)kbuf); 310f7116284SIan Campbell return rc; 311f7116284SIan Campbell } 312f7116284SIan Campbell 31386200154SDavid Vrabel static int evtchn_resize_ring(struct per_user_data *u) 31486200154SDavid Vrabel { 31586200154SDavid Vrabel unsigned int new_size; 31686200154SDavid Vrabel evtchn_port_t *new_ring, *old_ring; 31786200154SDavid Vrabel 31886200154SDavid Vrabel /* 31986200154SDavid Vrabel * Ensure the ring is large enough to capture all possible 32086200154SDavid Vrabel * events. i.e., one free slot for each bound event. 32186200154SDavid Vrabel */ 32286200154SDavid Vrabel if (u->nr_evtchns <= u->ring_size) 32386200154SDavid Vrabel return 0; 32486200154SDavid Vrabel 32586200154SDavid Vrabel if (u->ring_size == 0) 32686200154SDavid Vrabel new_size = 64; 32786200154SDavid Vrabel else 32886200154SDavid Vrabel new_size = 2 * u->ring_size; 32986200154SDavid Vrabel 330344476e1SKees Cook new_ring = kvmalloc_array(new_size, sizeof(*new_ring), GFP_KERNEL); 33186200154SDavid Vrabel if (!new_ring) 33286200154SDavid Vrabel return -ENOMEM; 33386200154SDavid Vrabel 33486200154SDavid Vrabel old_ring = u->ring; 33586200154SDavid Vrabel 33686200154SDavid Vrabel /* 33786200154SDavid Vrabel * Access to the ring contents is serialized by either the 33886200154SDavid Vrabel * prod /or/ cons lock so take both when resizing. 33986200154SDavid Vrabel */ 34086200154SDavid Vrabel mutex_lock(&u->ring_cons_mutex); 34186200154SDavid Vrabel spin_lock_irq(&u->ring_prod_lock); 34286200154SDavid Vrabel 34386200154SDavid Vrabel /* 34486200154SDavid Vrabel * Copy the old ring contents to the new ring. 34586200154SDavid Vrabel * 34627e0e638SJan Beulich * To take care of wrapping, a full ring, and the new index 34727e0e638SJan Beulich * pointing into the second half, simply copy the old contents 34827e0e638SJan Beulich * twice. 34986200154SDavid Vrabel * 35086200154SDavid Vrabel * +---------+ +------------------+ 35127e0e638SJan Beulich * |34567 12| -> |34567 1234567 12| 35227e0e638SJan Beulich * +-----p-c-+ +-------c------p---+ 35386200154SDavid Vrabel */ 35427e0e638SJan Beulich memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring)); 35527e0e638SJan Beulich memcpy(new_ring + u->ring_size, old_ring, 35627e0e638SJan Beulich u->ring_size * sizeof(*u->ring)); 35786200154SDavid Vrabel 35886200154SDavid Vrabel u->ring = new_ring; 35986200154SDavid Vrabel u->ring_size = new_size; 36086200154SDavid Vrabel 36186200154SDavid Vrabel spin_unlock_irq(&u->ring_prod_lock); 36286200154SDavid Vrabel mutex_unlock(&u->ring_cons_mutex); 36386200154SDavid Vrabel 36486200154SDavid Vrabel evtchn_free_ring(old_ring); 36586200154SDavid Vrabel 36686200154SDavid Vrabel return 0; 36786200154SDavid Vrabel } 36886200154SDavid Vrabel 36958f6259bSRahul Singh static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port, 37058f6259bSRahul Singh bool is_static) 371f7116284SIan Campbell { 37273cc4bb0SDavid Vrabel struct user_evtchn *evtchn; 373f7116284SIan Campbell int rc = 0; 374f7116284SIan Campbell 3750a4666b5SJeremy Fitzhardinge /* 3760a4666b5SJeremy Fitzhardinge * Ports are never reused, so every caller should pass in a 3770a4666b5SJeremy Fitzhardinge * unique port. 3780a4666b5SJeremy Fitzhardinge * 3790a4666b5SJeremy Fitzhardinge * (Locking not necessary because we haven't registered the 3800a4666b5SJeremy Fitzhardinge * interrupt handler yet, and our caller has already 3810a4666b5SJeremy Fitzhardinge * serialized bind operations.) 3820a4666b5SJeremy Fitzhardinge */ 38373cc4bb0SDavid Vrabel 38473cc4bb0SDavid Vrabel evtchn = kzalloc(sizeof(*evtchn), GFP_KERNEL); 38573cc4bb0SDavid Vrabel if (!evtchn) 38673cc4bb0SDavid Vrabel return -ENOMEM; 38773cc4bb0SDavid Vrabel 38873cc4bb0SDavid Vrabel evtchn->user = u; 38973cc4bb0SDavid Vrabel evtchn->port = port; 39073cc4bb0SDavid Vrabel evtchn->enabled = true; /* start enabled */ 39173cc4bb0SDavid Vrabel 39273cc4bb0SDavid Vrabel rc = add_evtchn(u, evtchn); 39373cc4bb0SDavid Vrabel if (rc < 0) 39473cc4bb0SDavid Vrabel goto err; 395f7116284SIan Campbell 39686200154SDavid Vrabel rc = evtchn_resize_ring(u); 39786200154SDavid Vrabel if (rc < 0) 39886200154SDavid Vrabel goto err; 39986200154SDavid Vrabel 400*3c8f5965SViresh Kumar rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, IRQF_SHARED, 40173cc4bb0SDavid Vrabel u->name, evtchn); 40273cc4bb0SDavid Vrabel if (rc < 0) 40373cc4bb0SDavid Vrabel goto err; 40473cc4bb0SDavid Vrabel 40558f6259bSRahul Singh rc = evtchn_make_refcounted(port, is_static); 40673cc4bb0SDavid Vrabel return rc; 40773cc4bb0SDavid Vrabel 40873cc4bb0SDavid Vrabel err: 409e7e44e44SWei Liu /* bind failed, should close the port now */ 41058f6259bSRahul Singh if (!is_static) 41158f6259bSRahul Singh xen_evtchn_close(port); 41258f6259bSRahul Singh 41373cc4bb0SDavid Vrabel del_evtchn(u, evtchn); 414f7116284SIan Campbell return rc; 415f7116284SIan Campbell } 416f7116284SIan Campbell 41773cc4bb0SDavid Vrabel static void evtchn_unbind_from_user(struct per_user_data *u, 41873cc4bb0SDavid Vrabel struct user_evtchn *evtchn) 419f7116284SIan Campbell { 42073cc4bb0SDavid Vrabel int irq = irq_from_evtchn(evtchn->port); 421f7116284SIan Campbell 422e7e44e44SWei Liu BUG_ON(irq < 0); 423e7e44e44SWei Liu 42473cc4bb0SDavid Vrabel unbind_from_irqhandler(irq, evtchn); 4250a4666b5SJeremy Fitzhardinge 42673cc4bb0SDavid Vrabel del_evtchn(u, evtchn); 427f7116284SIan Campbell } 428f7116284SIan Campbell 429f7116284SIan Campbell static long evtchn_ioctl(struct file *file, 430f7116284SIan Campbell unsigned int cmd, unsigned long arg) 431f7116284SIan Campbell { 432f7116284SIan Campbell int rc; 433f7116284SIan Campbell struct per_user_data *u = file->private_data; 434f7116284SIan Campbell void __user *uarg = (void __user *) arg; 435f7116284SIan Campbell 4360a4666b5SJeremy Fitzhardinge /* Prevent bind from racing with unbind */ 4370a4666b5SJeremy Fitzhardinge mutex_lock(&u->bind_mutex); 4380a4666b5SJeremy Fitzhardinge 439f7116284SIan Campbell switch (cmd) { 440f7116284SIan Campbell case IOCTL_EVTCHN_BIND_VIRQ: { 441f7116284SIan Campbell struct ioctl_evtchn_bind_virq bind; 442f7116284SIan Campbell struct evtchn_bind_virq bind_virq; 443f7116284SIan Campbell 444fbc872c3SDavid Vrabel rc = -EACCES; 445fbc872c3SDavid Vrabel if (u->restrict_domid != UNRESTRICTED_DOMID) 446fbc872c3SDavid Vrabel break; 447fbc872c3SDavid Vrabel 448f7116284SIan Campbell rc = -EFAULT; 449f7116284SIan Campbell if (copy_from_user(&bind, uarg, sizeof(bind))) 450f7116284SIan Campbell break; 451f7116284SIan Campbell 452f7116284SIan Campbell bind_virq.virq = bind.virq; 453cbbb4682SVitaly Kuznetsov bind_virq.vcpu = xen_vcpu_nr(0); 454f7116284SIan Campbell rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 455f7116284SIan Campbell &bind_virq); 456f7116284SIan Campbell if (rc != 0) 457f7116284SIan Campbell break; 458f7116284SIan Campbell 45958f6259bSRahul Singh rc = evtchn_bind_to_user(u, bind_virq.port, false); 460f7116284SIan Campbell if (rc == 0) 461f7116284SIan Campbell rc = bind_virq.port; 462f7116284SIan Campbell break; 463f7116284SIan Campbell } 464f7116284SIan Campbell 465f7116284SIan Campbell case IOCTL_EVTCHN_BIND_INTERDOMAIN: { 466f7116284SIan Campbell struct ioctl_evtchn_bind_interdomain bind; 467f7116284SIan Campbell struct evtchn_bind_interdomain bind_interdomain; 468f7116284SIan Campbell 469f7116284SIan Campbell rc = -EFAULT; 470f7116284SIan Campbell if (copy_from_user(&bind, uarg, sizeof(bind))) 471f7116284SIan Campbell break; 472f7116284SIan Campbell 473fbc872c3SDavid Vrabel rc = -EACCES; 474fbc872c3SDavid Vrabel if (u->restrict_domid != UNRESTRICTED_DOMID && 475fbc872c3SDavid Vrabel u->restrict_domid != bind.remote_domain) 476fbc872c3SDavid Vrabel break; 477fbc872c3SDavid Vrabel 478f7116284SIan Campbell bind_interdomain.remote_dom = bind.remote_domain; 479f7116284SIan Campbell bind_interdomain.remote_port = bind.remote_port; 480f7116284SIan Campbell rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, 481f7116284SIan Campbell &bind_interdomain); 482f7116284SIan Campbell if (rc != 0) 483f7116284SIan Campbell break; 484f7116284SIan Campbell 48558f6259bSRahul Singh rc = evtchn_bind_to_user(u, bind_interdomain.local_port, false); 48667473b81SThomas Gleixner if (rc == 0) 487f7116284SIan Campbell rc = bind_interdomain.local_port; 488f7116284SIan Campbell break; 489f7116284SIan Campbell } 490f7116284SIan Campbell 491f7116284SIan Campbell case IOCTL_EVTCHN_BIND_UNBOUND_PORT: { 492f7116284SIan Campbell struct ioctl_evtchn_bind_unbound_port bind; 493f7116284SIan Campbell struct evtchn_alloc_unbound alloc_unbound; 494f7116284SIan Campbell 495fbc872c3SDavid Vrabel rc = -EACCES; 496fbc872c3SDavid Vrabel if (u->restrict_domid != UNRESTRICTED_DOMID) 497fbc872c3SDavid Vrabel break; 498fbc872c3SDavid Vrabel 499f7116284SIan Campbell rc = -EFAULT; 500f7116284SIan Campbell if (copy_from_user(&bind, uarg, sizeof(bind))) 501f7116284SIan Campbell break; 502f7116284SIan Campbell 503f7116284SIan Campbell alloc_unbound.dom = DOMID_SELF; 504f7116284SIan Campbell alloc_unbound.remote_dom = bind.remote_domain; 505f7116284SIan Campbell rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, 506f7116284SIan Campbell &alloc_unbound); 507f7116284SIan Campbell if (rc != 0) 508f7116284SIan Campbell break; 509f7116284SIan Campbell 51058f6259bSRahul Singh rc = evtchn_bind_to_user(u, alloc_unbound.port, false); 511f7116284SIan Campbell if (rc == 0) 512f7116284SIan Campbell rc = alloc_unbound.port; 513f7116284SIan Campbell break; 514f7116284SIan Campbell } 515f7116284SIan Campbell 516f7116284SIan Campbell case IOCTL_EVTCHN_UNBIND: { 517f7116284SIan Campbell struct ioctl_evtchn_unbind unbind; 51873cc4bb0SDavid Vrabel struct user_evtchn *evtchn; 519f7116284SIan Campbell 520f7116284SIan Campbell rc = -EFAULT; 521f7116284SIan Campbell if (copy_from_user(&unbind, uarg, sizeof(unbind))) 522f7116284SIan Campbell break; 523f7116284SIan Campbell 524f7116284SIan Campbell rc = -EINVAL; 5250dc0064aSDavid Vrabel if (unbind.port >= xen_evtchn_nr_channels()) 526f7116284SIan Campbell break; 527f7116284SIan Campbell 528f7116284SIan Campbell rc = -ENOTCONN; 52973cc4bb0SDavid Vrabel evtchn = find_evtchn(u, unbind.port); 53073cc4bb0SDavid Vrabel if (!evtchn) 531f7116284SIan Campbell break; 532f7116284SIan Campbell 5333f5e554fSJeremy Fitzhardinge disable_irq(irq_from_evtchn(unbind.port)); 53473cc4bb0SDavid Vrabel evtchn_unbind_from_user(u, evtchn); 535f7116284SIan Campbell rc = 0; 536f7116284SIan Campbell break; 537f7116284SIan Campbell } 538f7116284SIan Campbell 53958f6259bSRahul Singh case IOCTL_EVTCHN_BIND_STATIC: { 54058f6259bSRahul Singh struct ioctl_evtchn_bind bind; 54158f6259bSRahul Singh struct user_evtchn *evtchn; 54258f6259bSRahul Singh 54358f6259bSRahul Singh rc = -EFAULT; 54458f6259bSRahul Singh if (copy_from_user(&bind, uarg, sizeof(bind))) 54558f6259bSRahul Singh break; 54658f6259bSRahul Singh 54758f6259bSRahul Singh rc = -EISCONN; 54858f6259bSRahul Singh evtchn = find_evtchn(u, bind.port); 54958f6259bSRahul Singh if (evtchn) 55058f6259bSRahul Singh break; 55158f6259bSRahul Singh 55258f6259bSRahul Singh rc = evtchn_bind_to_user(u, bind.port, true); 55358f6259bSRahul Singh break; 55458f6259bSRahul Singh } 55558f6259bSRahul Singh 556f7116284SIan Campbell case IOCTL_EVTCHN_NOTIFY: { 557f7116284SIan Campbell struct ioctl_evtchn_notify notify; 55873cc4bb0SDavid Vrabel struct user_evtchn *evtchn; 559f7116284SIan Campbell 560f7116284SIan Campbell rc = -EFAULT; 561f7116284SIan Campbell if (copy_from_user(¬ify, uarg, sizeof(notify))) 562f7116284SIan Campbell break; 563f7116284SIan Campbell 564f7116284SIan Campbell rc = -ENOTCONN; 56573cc4bb0SDavid Vrabel evtchn = find_evtchn(u, notify.port); 56673cc4bb0SDavid Vrabel if (evtchn) { 567f7116284SIan Campbell notify_remote_via_evtchn(notify.port); 568f7116284SIan Campbell rc = 0; 569f7116284SIan Campbell } 570f7116284SIan Campbell break; 571f7116284SIan Campbell } 572f7116284SIan Campbell 573f7116284SIan Campbell case IOCTL_EVTCHN_RESET: { 574f7116284SIan Campbell /* Initialise the ring to empty. Clear errors. */ 575f7116284SIan Campbell mutex_lock(&u->ring_cons_mutex); 57673cc4bb0SDavid Vrabel spin_lock_irq(&u->ring_prod_lock); 5776977c0b5SJuergen Gross WRITE_ONCE(u->ring_cons, 0); 5786977c0b5SJuergen Gross WRITE_ONCE(u->ring_prod, 0); 5796977c0b5SJuergen Gross u->ring_overflow = 0; 58073cc4bb0SDavid Vrabel spin_unlock_irq(&u->ring_prod_lock); 581f7116284SIan Campbell mutex_unlock(&u->ring_cons_mutex); 582f7116284SIan Campbell rc = 0; 583f7116284SIan Campbell break; 584f7116284SIan Campbell } 585f7116284SIan Campbell 586fbc872c3SDavid Vrabel case IOCTL_EVTCHN_RESTRICT_DOMID: { 587fbc872c3SDavid Vrabel struct ioctl_evtchn_restrict_domid ierd; 588fbc872c3SDavid Vrabel 589fbc872c3SDavid Vrabel rc = -EACCES; 590fbc872c3SDavid Vrabel if (u->restrict_domid != UNRESTRICTED_DOMID) 591fbc872c3SDavid Vrabel break; 592fbc872c3SDavid Vrabel 593fbc872c3SDavid Vrabel rc = -EFAULT; 594fbc872c3SDavid Vrabel if (copy_from_user(&ierd, uarg, sizeof(ierd))) 595fbc872c3SDavid Vrabel break; 596fbc872c3SDavid Vrabel 597fbc872c3SDavid Vrabel rc = -EINVAL; 598fbc872c3SDavid Vrabel if (ierd.domid == 0 || ierd.domid >= DOMID_FIRST_RESERVED) 599fbc872c3SDavid Vrabel break; 600fbc872c3SDavid Vrabel 601fbc872c3SDavid Vrabel u->restrict_domid = ierd.domid; 602fbc872c3SDavid Vrabel rc = 0; 603fbc872c3SDavid Vrabel 604fbc872c3SDavid Vrabel break; 605fbc872c3SDavid Vrabel } 606fbc872c3SDavid Vrabel 607f7116284SIan Campbell default: 608f7116284SIan Campbell rc = -ENOSYS; 609f7116284SIan Campbell break; 610f7116284SIan Campbell } 6110a4666b5SJeremy Fitzhardinge mutex_unlock(&u->bind_mutex); 612f7116284SIan Campbell 613f7116284SIan Campbell return rc; 614f7116284SIan Campbell } 615f7116284SIan Campbell 616afc9a42bSAl Viro static __poll_t evtchn_poll(struct file *file, poll_table *wait) 617f7116284SIan Campbell { 618a9a08845SLinus Torvalds __poll_t mask = EPOLLOUT | EPOLLWRNORM; 619f7116284SIan Campbell struct per_user_data *u = file->private_data; 620f7116284SIan Campbell 621f7116284SIan Campbell poll_wait(file, &u->evtchn_wait, wait); 6226977c0b5SJuergen Gross if (READ_ONCE(u->ring_cons) != READ_ONCE(u->ring_prod)) 623a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM; 624f7116284SIan Campbell if (u->ring_overflow) 625a9a08845SLinus Torvalds mask = EPOLLERR; 626f7116284SIan Campbell return mask; 627f7116284SIan Campbell } 628f7116284SIan Campbell 629f7116284SIan Campbell static int evtchn_fasync(int fd, struct file *filp, int on) 630f7116284SIan Campbell { 631f7116284SIan Campbell struct per_user_data *u = filp->private_data; 632f7116284SIan Campbell return fasync_helper(fd, filp, on, &u->evtchn_async_queue); 633f7116284SIan Campbell } 634f7116284SIan Campbell 635f7116284SIan Campbell static int evtchn_open(struct inode *inode, struct file *filp) 636f7116284SIan Campbell { 637f7116284SIan Campbell struct per_user_data *u; 638f7116284SIan Campbell 639f7116284SIan Campbell u = kzalloc(sizeof(*u), GFP_KERNEL); 640f7116284SIan Campbell if (u == NULL) 641f7116284SIan Campbell return -ENOMEM; 642f7116284SIan Campbell 643f7116284SIan Campbell u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm); 644f7116284SIan Campbell if (u->name == NULL) { 645f7116284SIan Campbell kfree(u); 646f7116284SIan Campbell return -ENOMEM; 647f7116284SIan Campbell } 648f7116284SIan Campbell 649f7116284SIan Campbell init_waitqueue_head(&u->evtchn_wait); 650f7116284SIan Campbell 6510a4666b5SJeremy Fitzhardinge mutex_init(&u->bind_mutex); 652f7116284SIan Campbell mutex_init(&u->ring_cons_mutex); 65373cc4bb0SDavid Vrabel spin_lock_init(&u->ring_prod_lock); 654f7116284SIan Campbell 655fbc872c3SDavid Vrabel u->restrict_domid = UNRESTRICTED_DOMID; 656fbc872c3SDavid Vrabel 657f7116284SIan Campbell filp->private_data = u; 658f7116284SIan Campbell 659c5bf68feSKirill Smelkov return stream_open(inode, filp); 660f7116284SIan Campbell } 661f7116284SIan Campbell 662f7116284SIan Campbell static int evtchn_release(struct inode *inode, struct file *filp) 663f7116284SIan Campbell { 664f7116284SIan Campbell struct per_user_data *u = filp->private_data; 66573cc4bb0SDavid Vrabel struct rb_node *node; 666f7116284SIan Campbell 66773cc4bb0SDavid Vrabel while ((node = u->evtchns.rb_node)) { 66873cc4bb0SDavid Vrabel struct user_evtchn *evtchn; 669f7116284SIan Campbell 67073cc4bb0SDavid Vrabel evtchn = rb_entry(node, struct user_evtchn, node); 67173cc4bb0SDavid Vrabel disable_irq(irq_from_evtchn(evtchn->port)); 67273cc4bb0SDavid Vrabel evtchn_unbind_from_user(u, evtchn); 6733f5e554fSJeremy Fitzhardinge } 6743f5e554fSJeremy Fitzhardinge 67586200154SDavid Vrabel evtchn_free_ring(u->ring); 676f7116284SIan Campbell kfree(u->name); 677f7116284SIan Campbell kfree(u); 678f7116284SIan Campbell 679f7116284SIan Campbell return 0; 680f7116284SIan Campbell } 681f7116284SIan Campbell 682f7116284SIan Campbell static const struct file_operations evtchn_fops = { 683f7116284SIan Campbell .owner = THIS_MODULE, 684f7116284SIan Campbell .read = evtchn_read, 685f7116284SIan Campbell .write = evtchn_write, 686f7116284SIan Campbell .unlocked_ioctl = evtchn_ioctl, 687f7116284SIan Campbell .poll = evtchn_poll, 688f7116284SIan Campbell .fasync = evtchn_fasync, 689f7116284SIan Campbell .open = evtchn_open, 690f7116284SIan Campbell .release = evtchn_release, 691bc7fc5e3SJeremy Fitzhardinge .llseek = no_llseek, 692f7116284SIan Campbell }; 693f7116284SIan Campbell 694f7116284SIan Campbell static struct miscdevice evtchn_miscdev = { 695f7116284SIan Campbell .minor = MISC_DYNAMIC_MINOR, 696376d908fSBastian Blank .name = "xen/evtchn", 697f7116284SIan Campbell .fops = &evtchn_fops, 698f7116284SIan Campbell }; 699f7116284SIan Campbell static int __init evtchn_init(void) 700f7116284SIan Campbell { 701f7116284SIan Campbell int err; 702f7116284SIan Campbell 703f7116284SIan Campbell if (!xen_domain()) 704f7116284SIan Campbell return -ENODEV; 705f7116284SIan Campbell 70618283ea7SWei Liu /* Create '/dev/xen/evtchn'. */ 707f7116284SIan Campbell err = misc_register(&evtchn_miscdev); 708f7116284SIan Campbell if (err != 0) { 709283c0972SJoe Perches pr_err("Could not register /dev/xen/evtchn\n"); 710f7116284SIan Campbell return err; 711f7116284SIan Campbell } 712f7116284SIan Campbell 713283c0972SJoe Perches pr_info("Event-channel device installed\n"); 714f7116284SIan Campbell 715f7116284SIan Campbell return 0; 716f7116284SIan Campbell } 717f7116284SIan Campbell 718f7116284SIan Campbell static void __exit evtchn_cleanup(void) 719f7116284SIan Campbell { 720f7116284SIan Campbell misc_deregister(&evtchn_miscdev); 721f7116284SIan Campbell } 722f7116284SIan Campbell 723f7116284SIan Campbell module_init(evtchn_init); 724f7116284SIan Campbell module_exit(evtchn_cleanup); 725f7116284SIan Campbell 726f7116284SIan Campbell MODULE_LICENSE("GPL"); 727