1f7116284SIan Campbell /******************************************************************************
2f7116284SIan Campbell * evtchn.c
3f7116284SIan Campbell *
4f7116284SIan Campbell * Driver for receiving and demuxing event-channel signals.
5f7116284SIan Campbell *
6f7116284SIan Campbell * Copyright (c) 2004-2005, K A Fraser
7f7116284SIan Campbell * Multi-process extensions Copyright (c) 2004, Steven Smith
8f7116284SIan Campbell *
9f7116284SIan Campbell * This program is free software; you can redistribute it and/or
10f7116284SIan Campbell * modify it under the terms of the GNU General Public License version 2
11f7116284SIan Campbell * as published by the Free Software Foundation; or, when distributed
12f7116284SIan Campbell * separately from the Linux kernel or incorporated into other
13f7116284SIan Campbell * software packages, subject to the following license:
14f7116284SIan Campbell *
15f7116284SIan Campbell * Permission is hereby granted, free of charge, to any person obtaining a copy
16f7116284SIan Campbell * of this source file (the "Software"), to deal in the Software without
17f7116284SIan Campbell * restriction, including without limitation the rights to use, copy, modify,
18f7116284SIan Campbell * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19f7116284SIan Campbell * and to permit persons to whom the Software is furnished to do so, subject to
20f7116284SIan Campbell * the following conditions:
21f7116284SIan Campbell *
22f7116284SIan Campbell * The above copyright notice and this permission notice shall be included in
23f7116284SIan Campbell * all copies or substantial portions of the Software.
24f7116284SIan Campbell *
25f7116284SIan Campbell * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26f7116284SIan Campbell * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27f7116284SIan Campbell * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28f7116284SIan Campbell * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29f7116284SIan Campbell * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30f7116284SIan Campbell * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31f7116284SIan Campbell * IN THE SOFTWARE.
32f7116284SIan Campbell */
33f7116284SIan Campbell
34283c0972SJoe Perches #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
35283c0972SJoe Perches
36f7116284SIan Campbell #include <linux/module.h>
37f7116284SIan Campbell #include <linux/kernel.h>
38f7116284SIan Campbell #include <linux/sched.h>
39f7116284SIan Campbell #include <linux/slab.h>
40f7116284SIan Campbell #include <linux/string.h>
41f7116284SIan Campbell #include <linux/errno.h>
42f7116284SIan Campbell #include <linux/fs.h>
43f7116284SIan Campbell #include <linux/miscdevice.h>
44f7116284SIan Campbell #include <linux/major.h>
45f7116284SIan Campbell #include <linux/proc_fs.h>
46f7116284SIan Campbell #include <linux/stat.h>
47f7116284SIan Campbell #include <linux/poll.h>
48f7116284SIan Campbell #include <linux/irq.h>
49f7116284SIan Campbell #include <linux/init.h>
50f7116284SIan Campbell #include <linux/mutex.h>
51f7116284SIan Campbell #include <linux/cpu.h>
5286200154SDavid Vrabel #include <linux/mm.h>
5386200154SDavid Vrabel #include <linux/vmalloc.h>
541ccbf534SJeremy Fitzhardinge
551ccbf534SJeremy Fitzhardinge #include <xen/xen.h>
56f7116284SIan Campbell #include <xen/events.h>
57f7116284SIan Campbell #include <xen/evtchn.h>
58cbbb4682SVitaly Kuznetsov #include <xen/xen-ops.h>
59f7116284SIan Campbell #include <asm/xen/hypervisor.h>
60f7116284SIan Campbell
61f7116284SIan Campbell struct per_user_data {
620a4666b5SJeremy Fitzhardinge struct mutex bind_mutex; /* serialize bind/unbind operations */
6373cc4bb0SDavid Vrabel struct rb_root evtchns;
6486200154SDavid Vrabel unsigned int nr_evtchns;
650a4666b5SJeremy Fitzhardinge
66f7116284SIan Campbell /* Notification ring, accessed via /dev/xen/evtchn. */
6786200154SDavid Vrabel unsigned int ring_size;
68f7116284SIan Campbell evtchn_port_t *ring;
69f7116284SIan Campbell unsigned int ring_cons, ring_prod, ring_overflow;
70f7116284SIan Campbell struct mutex ring_cons_mutex; /* protect against concurrent readers */
7173cc4bb0SDavid Vrabel spinlock_t ring_prod_lock; /* product against concurrent interrupts */
72f7116284SIan Campbell
73f7116284SIan Campbell /* Processes wait on this queue when ring is empty. */
74f7116284SIan Campbell wait_queue_head_t evtchn_wait;
75f7116284SIan Campbell struct fasync_struct *evtchn_async_queue;
76f7116284SIan Campbell const char *name;
77fbc872c3SDavid Vrabel
78fbc872c3SDavid Vrabel domid_t restrict_domid;
79f7116284SIan Campbell };
80f7116284SIan Campbell
81fbc872c3SDavid Vrabel #define UNRESTRICTED_DOMID ((domid_t)-1)
82fbc872c3SDavid Vrabel
8373cc4bb0SDavid Vrabel struct user_evtchn {
8473cc4bb0SDavid Vrabel struct rb_node node;
8573cc4bb0SDavid Vrabel struct per_user_data *user;
860102e4efSYan Yankovskyi evtchn_port_t port;
8773cc4bb0SDavid Vrabel bool enabled;
88*99e42503SJuergen Gross bool unbinding;
8973cc4bb0SDavid Vrabel };
90f7116284SIan Campbell
evtchn_free_ring(evtchn_port_t * ring)9186200154SDavid Vrabel static void evtchn_free_ring(evtchn_port_t *ring)
9286200154SDavid Vrabel {
9386200154SDavid Vrabel kvfree(ring);
9486200154SDavid Vrabel }
9586200154SDavid Vrabel
evtchn_ring_offset(struct per_user_data * u,unsigned int idx)9686200154SDavid Vrabel static unsigned int evtchn_ring_offset(struct per_user_data *u,
9786200154SDavid Vrabel unsigned int idx)
9886200154SDavid Vrabel {
9986200154SDavid Vrabel return idx & (u->ring_size - 1);
10086200154SDavid Vrabel }
10186200154SDavid Vrabel
evtchn_ring_entry(struct per_user_data * u,unsigned int idx)10286200154SDavid Vrabel static evtchn_port_t *evtchn_ring_entry(struct per_user_data *u,
10386200154SDavid Vrabel unsigned int idx)
10486200154SDavid Vrabel {
10586200154SDavid Vrabel return u->ring + evtchn_ring_offset(u, idx);
10686200154SDavid Vrabel }
10786200154SDavid Vrabel
add_evtchn(struct per_user_data * u,struct user_evtchn * evtchn)10873cc4bb0SDavid Vrabel static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
109e3cc067bSJeremy Fitzhardinge {
11073cc4bb0SDavid Vrabel struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL;
111e3cc067bSJeremy Fitzhardinge
11286200154SDavid Vrabel u->nr_evtchns++;
11386200154SDavid Vrabel
11473cc4bb0SDavid Vrabel while (*new) {
11573cc4bb0SDavid Vrabel struct user_evtchn *this;
116e3cc067bSJeremy Fitzhardinge
1172f60b288SGeliang Tang this = rb_entry(*new, struct user_evtchn, node);
118e3cc067bSJeremy Fitzhardinge
11973cc4bb0SDavid Vrabel parent = *new;
12073cc4bb0SDavid Vrabel if (this->port < evtchn->port)
12173cc4bb0SDavid Vrabel new = &((*new)->rb_left);
12273cc4bb0SDavid Vrabel else if (this->port > evtchn->port)
12373cc4bb0SDavid Vrabel new = &((*new)->rb_right);
124e3cc067bSJeremy Fitzhardinge else
12573cc4bb0SDavid Vrabel return -EEXIST;
12673cc4bb0SDavid Vrabel }
12773cc4bb0SDavid Vrabel
12873cc4bb0SDavid Vrabel /* Add new node and rebalance tree. */
12973cc4bb0SDavid Vrabel rb_link_node(&evtchn->node, parent, new);
13073cc4bb0SDavid Vrabel rb_insert_color(&evtchn->node, &u->evtchns);
13173cc4bb0SDavid Vrabel
13273cc4bb0SDavid Vrabel return 0;
13373cc4bb0SDavid Vrabel }
13473cc4bb0SDavid Vrabel
del_evtchn(struct per_user_data * u,struct user_evtchn * evtchn)13573cc4bb0SDavid Vrabel static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
13673cc4bb0SDavid Vrabel {
13786200154SDavid Vrabel u->nr_evtchns--;
13873cc4bb0SDavid Vrabel rb_erase(&evtchn->node, &u->evtchns);
13973cc4bb0SDavid Vrabel kfree(evtchn);
14073cc4bb0SDavid Vrabel }
14173cc4bb0SDavid Vrabel
find_evtchn(struct per_user_data * u,evtchn_port_t port)1420102e4efSYan Yankovskyi static struct user_evtchn *find_evtchn(struct per_user_data *u,
1430102e4efSYan Yankovskyi evtchn_port_t port)
14473cc4bb0SDavid Vrabel {
14573cc4bb0SDavid Vrabel struct rb_node *node = u->evtchns.rb_node;
14673cc4bb0SDavid Vrabel
14773cc4bb0SDavid Vrabel while (node) {
14873cc4bb0SDavid Vrabel struct user_evtchn *evtchn;
14973cc4bb0SDavid Vrabel
1502f60b288SGeliang Tang evtchn = rb_entry(node, struct user_evtchn, node);
15173cc4bb0SDavid Vrabel
15273cc4bb0SDavid Vrabel if (evtchn->port < port)
15373cc4bb0SDavid Vrabel node = node->rb_left;
15473cc4bb0SDavid Vrabel else if (evtchn->port > port)
15573cc4bb0SDavid Vrabel node = node->rb_right;
15673cc4bb0SDavid Vrabel else
15773cc4bb0SDavid Vrabel return evtchn;
15873cc4bb0SDavid Vrabel }
15973cc4bb0SDavid Vrabel return NULL;
160e3cc067bSJeremy Fitzhardinge }
161e3cc067bSJeremy Fitzhardinge
evtchn_interrupt(int irq,void * data)16270697d54SJeremy Fitzhardinge static irqreturn_t evtchn_interrupt(int irq, void *data)
163f7116284SIan Campbell {
16473cc4bb0SDavid Vrabel struct user_evtchn *evtchn = data;
16573cc4bb0SDavid Vrabel struct per_user_data *u = evtchn->user;
1666977c0b5SJuergen Gross unsigned int prod, cons;
167f7116284SIan Campbell
168*99e42503SJuergen Gross /* Handler might be called when tearing down the IRQ. */
169*99e42503SJuergen Gross if (evtchn->unbinding)
170*99e42503SJuergen Gross return IRQ_HANDLED;
171*99e42503SJuergen Gross
17273cc4bb0SDavid Vrabel WARN(!evtchn->enabled,
1730102e4efSYan Yankovskyi "Interrupt for port %u, but apparently not enabled; per-user %p\n",
17473cc4bb0SDavid Vrabel evtchn->port, u);
175f7116284SIan Campbell
17673cc4bb0SDavid Vrabel evtchn->enabled = false;
17773cc4bb0SDavid Vrabel
17873cc4bb0SDavid Vrabel spin_lock(&u->ring_prod_lock);
179f7116284SIan Campbell
1806977c0b5SJuergen Gross prod = READ_ONCE(u->ring_prod);
1816977c0b5SJuergen Gross cons = READ_ONCE(u->ring_cons);
1826977c0b5SJuergen Gross
1836977c0b5SJuergen Gross if ((prod - cons) < u->ring_size) {
1846977c0b5SJuergen Gross *evtchn_ring_entry(u, prod) = evtchn->port;
185d5669959SJuergen Gross smp_wmb(); /* Ensure ring contents visible */
1866977c0b5SJuergen Gross WRITE_ONCE(u->ring_prod, prod + 1);
1876977c0b5SJuergen Gross if (cons == prod) {
188f7116284SIan Campbell wake_up_interruptible(&u->evtchn_wait);
189f7116284SIan Campbell kill_fasync(&u->evtchn_async_queue,
190f7116284SIan Campbell SIGIO, POLL_IN);
191f7116284SIan Campbell }
192e3cc067bSJeremy Fitzhardinge } else
193f7116284SIan Campbell u->ring_overflow = 1;
194f7116284SIan Campbell
19573cc4bb0SDavid Vrabel spin_unlock(&u->ring_prod_lock);
196f7116284SIan Campbell
197f7116284SIan Campbell return IRQ_HANDLED;
198f7116284SIan Campbell }
199f7116284SIan Campbell
evtchn_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)200f7116284SIan Campbell static ssize_t evtchn_read(struct file *file, char __user *buf,
201f7116284SIan Campbell size_t count, loff_t *ppos)
202f7116284SIan Campbell {
203f7116284SIan Campbell int rc;
204f7116284SIan Campbell unsigned int c, p, bytes1 = 0, bytes2 = 0;
205f7116284SIan Campbell struct per_user_data *u = file->private_data;
206f7116284SIan Campbell
207f7116284SIan Campbell /* Whole number of ports. */
208f7116284SIan Campbell count &= ~(sizeof(evtchn_port_t)-1);
209f7116284SIan Campbell
210f7116284SIan Campbell if (count == 0)
211f7116284SIan Campbell return 0;
212f7116284SIan Campbell
213f7116284SIan Campbell if (count > PAGE_SIZE)
214f7116284SIan Campbell count = PAGE_SIZE;
215f7116284SIan Campbell
216f7116284SIan Campbell for (;;) {
217f7116284SIan Campbell mutex_lock(&u->ring_cons_mutex);
218f7116284SIan Campbell
219f7116284SIan Campbell rc = -EFBIG;
220f7116284SIan Campbell if (u->ring_overflow)
221f7116284SIan Campbell goto unlock_out;
222f7116284SIan Campbell
2236977c0b5SJuergen Gross c = READ_ONCE(u->ring_cons);
2246977c0b5SJuergen Gross p = READ_ONCE(u->ring_prod);
225f7116284SIan Campbell if (c != p)
226f7116284SIan Campbell break;
227f7116284SIan Campbell
228f7116284SIan Campbell mutex_unlock(&u->ring_cons_mutex);
229f7116284SIan Campbell
230f7116284SIan Campbell if (file->f_flags & O_NONBLOCK)
231f7116284SIan Campbell return -EAGAIN;
232f7116284SIan Campbell
233f7116284SIan Campbell rc = wait_event_interruptible(u->evtchn_wait,
2346977c0b5SJuergen Gross READ_ONCE(u->ring_cons) != READ_ONCE(u->ring_prod));
235f7116284SIan Campbell if (rc)
236f7116284SIan Campbell return rc;
237f7116284SIan Campbell }
238f7116284SIan Campbell
239f7116284SIan Campbell /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
24086200154SDavid Vrabel if (((c ^ p) & u->ring_size) != 0) {
24186200154SDavid Vrabel bytes1 = (u->ring_size - evtchn_ring_offset(u, c)) *
242f7116284SIan Campbell sizeof(evtchn_port_t);
24386200154SDavid Vrabel bytes2 = evtchn_ring_offset(u, p) * sizeof(evtchn_port_t);
244f7116284SIan Campbell } else {
245f7116284SIan Campbell bytes1 = (p - c) * sizeof(evtchn_port_t);
246f7116284SIan Campbell bytes2 = 0;
247f7116284SIan Campbell }
248f7116284SIan Campbell
249f7116284SIan Campbell /* Truncate chunks according to caller's maximum byte count. */
250f7116284SIan Campbell if (bytes1 > count) {
251f7116284SIan Campbell bytes1 = count;
252f7116284SIan Campbell bytes2 = 0;
253f7116284SIan Campbell } else if ((bytes1 + bytes2) > count) {
254f7116284SIan Campbell bytes2 = count - bytes1;
255f7116284SIan Campbell }
256f7116284SIan Campbell
257f7116284SIan Campbell rc = -EFAULT;
258d5669959SJuergen Gross smp_rmb(); /* Ensure that we see the port before we copy it. */
25986200154SDavid Vrabel if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) ||
260f7116284SIan Campbell ((bytes2 != 0) &&
261f7116284SIan Campbell copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
262f7116284SIan Campbell goto unlock_out;
263f7116284SIan Campbell
2646977c0b5SJuergen Gross WRITE_ONCE(u->ring_cons, c + (bytes1 + bytes2) / sizeof(evtchn_port_t));
265f7116284SIan Campbell rc = bytes1 + bytes2;
266f7116284SIan Campbell
267f7116284SIan Campbell unlock_out:
268f7116284SIan Campbell mutex_unlock(&u->ring_cons_mutex);
269f7116284SIan Campbell return rc;
270f7116284SIan Campbell }
271f7116284SIan Campbell
evtchn_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)272f7116284SIan Campbell static ssize_t evtchn_write(struct file *file, const char __user *buf,
273f7116284SIan Campbell size_t count, loff_t *ppos)
274f7116284SIan Campbell {
275f7116284SIan Campbell int rc, i;
276f7116284SIan Campbell evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
277f7116284SIan Campbell struct per_user_data *u = file->private_data;
278f7116284SIan Campbell
279f7116284SIan Campbell if (kbuf == NULL)
280f7116284SIan Campbell return -ENOMEM;
281f7116284SIan Campbell
282f7116284SIan Campbell /* Whole number of ports. */
283f7116284SIan Campbell count &= ~(sizeof(evtchn_port_t)-1);
284f7116284SIan Campbell
285f7116284SIan Campbell rc = 0;
286f7116284SIan Campbell if (count == 0)
287f7116284SIan Campbell goto out;
288f7116284SIan Campbell
289f7116284SIan Campbell if (count > PAGE_SIZE)
290f7116284SIan Campbell count = PAGE_SIZE;
291f7116284SIan Campbell
292f7116284SIan Campbell rc = -EFAULT;
293f7116284SIan Campbell if (copy_from_user(kbuf, buf, count) != 0)
294f7116284SIan Campbell goto out;
295f7116284SIan Campbell
29673cc4bb0SDavid Vrabel mutex_lock(&u->bind_mutex);
297e3cc067bSJeremy Fitzhardinge
298e3cc067bSJeremy Fitzhardinge for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) {
2990102e4efSYan Yankovskyi evtchn_port_t port = kbuf[i];
30073cc4bb0SDavid Vrabel struct user_evtchn *evtchn;
301e3cc067bSJeremy Fitzhardinge
30273cc4bb0SDavid Vrabel evtchn = find_evtchn(u, port);
30373cc4bb0SDavid Vrabel if (evtchn && !evtchn->enabled) {
30473cc4bb0SDavid Vrabel evtchn->enabled = true;
305c44b849cSJuergen Gross xen_irq_lateeoi(irq_from_evtchn(port), 0);
306e3cc067bSJeremy Fitzhardinge }
307e3cc067bSJeremy Fitzhardinge }
308e3cc067bSJeremy Fitzhardinge
30973cc4bb0SDavid Vrabel mutex_unlock(&u->bind_mutex);
310f7116284SIan Campbell
311f7116284SIan Campbell rc = count;
312f7116284SIan Campbell
313f7116284SIan Campbell out:
314f7116284SIan Campbell free_page((unsigned long)kbuf);
315f7116284SIan Campbell return rc;
316f7116284SIan Campbell }
317f7116284SIan Campbell
evtchn_resize_ring(struct per_user_data * u)31886200154SDavid Vrabel static int evtchn_resize_ring(struct per_user_data *u)
31986200154SDavid Vrabel {
32086200154SDavid Vrabel unsigned int new_size;
32186200154SDavid Vrabel evtchn_port_t *new_ring, *old_ring;
32286200154SDavid Vrabel
32386200154SDavid Vrabel /*
32486200154SDavid Vrabel * Ensure the ring is large enough to capture all possible
32586200154SDavid Vrabel * events. i.e., one free slot for each bound event.
32686200154SDavid Vrabel */
32786200154SDavid Vrabel if (u->nr_evtchns <= u->ring_size)
32886200154SDavid Vrabel return 0;
32986200154SDavid Vrabel
33086200154SDavid Vrabel if (u->ring_size == 0)
33186200154SDavid Vrabel new_size = 64;
33286200154SDavid Vrabel else
33386200154SDavid Vrabel new_size = 2 * u->ring_size;
33486200154SDavid Vrabel
335344476e1SKees Cook new_ring = kvmalloc_array(new_size, sizeof(*new_ring), GFP_KERNEL);
33686200154SDavid Vrabel if (!new_ring)
33786200154SDavid Vrabel return -ENOMEM;
33886200154SDavid Vrabel
33986200154SDavid Vrabel old_ring = u->ring;
34086200154SDavid Vrabel
34186200154SDavid Vrabel /*
34286200154SDavid Vrabel * Access to the ring contents is serialized by either the
34386200154SDavid Vrabel * prod /or/ cons lock so take both when resizing.
34486200154SDavid Vrabel */
34586200154SDavid Vrabel mutex_lock(&u->ring_cons_mutex);
34686200154SDavid Vrabel spin_lock_irq(&u->ring_prod_lock);
34786200154SDavid Vrabel
34886200154SDavid Vrabel /*
34986200154SDavid Vrabel * Copy the old ring contents to the new ring.
35086200154SDavid Vrabel *
35127e0e638SJan Beulich * To take care of wrapping, a full ring, and the new index
35227e0e638SJan Beulich * pointing into the second half, simply copy the old contents
35327e0e638SJan Beulich * twice.
35486200154SDavid Vrabel *
35586200154SDavid Vrabel * +---------+ +------------------+
35627e0e638SJan Beulich * |34567 12| -> |34567 1234567 12|
35727e0e638SJan Beulich * +-----p-c-+ +-------c------p---+
35886200154SDavid Vrabel */
35927e0e638SJan Beulich memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
36027e0e638SJan Beulich memcpy(new_ring + u->ring_size, old_ring,
36127e0e638SJan Beulich u->ring_size * sizeof(*u->ring));
36286200154SDavid Vrabel
36386200154SDavid Vrabel u->ring = new_ring;
36486200154SDavid Vrabel u->ring_size = new_size;
36586200154SDavid Vrabel
36686200154SDavid Vrabel spin_unlock_irq(&u->ring_prod_lock);
36786200154SDavid Vrabel mutex_unlock(&u->ring_cons_mutex);
36886200154SDavid Vrabel
36986200154SDavid Vrabel evtchn_free_ring(old_ring);
37086200154SDavid Vrabel
37186200154SDavid Vrabel return 0;
37286200154SDavid Vrabel }
37386200154SDavid Vrabel
evtchn_bind_to_user(struct per_user_data * u,evtchn_port_t port,bool is_static)37458f6259bSRahul Singh static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port,
37558f6259bSRahul Singh bool is_static)
376f7116284SIan Campbell {
37773cc4bb0SDavid Vrabel struct user_evtchn *evtchn;
378f7116284SIan Campbell int rc = 0;
379f7116284SIan Campbell
3800a4666b5SJeremy Fitzhardinge /*
3810a4666b5SJeremy Fitzhardinge * Ports are never reused, so every caller should pass in a
3820a4666b5SJeremy Fitzhardinge * unique port.
3830a4666b5SJeremy Fitzhardinge *
3840a4666b5SJeremy Fitzhardinge * (Locking not necessary because we haven't registered the
3850a4666b5SJeremy Fitzhardinge * interrupt handler yet, and our caller has already
3860a4666b5SJeremy Fitzhardinge * serialized bind operations.)
3870a4666b5SJeremy Fitzhardinge */
38873cc4bb0SDavid Vrabel
38973cc4bb0SDavid Vrabel evtchn = kzalloc(sizeof(*evtchn), GFP_KERNEL);
39073cc4bb0SDavid Vrabel if (!evtchn)
39173cc4bb0SDavid Vrabel return -ENOMEM;
39273cc4bb0SDavid Vrabel
39373cc4bb0SDavid Vrabel evtchn->user = u;
39473cc4bb0SDavid Vrabel evtchn->port = port;
39573cc4bb0SDavid Vrabel evtchn->enabled = true; /* start enabled */
39673cc4bb0SDavid Vrabel
39773cc4bb0SDavid Vrabel rc = add_evtchn(u, evtchn);
39873cc4bb0SDavid Vrabel if (rc < 0)
39973cc4bb0SDavid Vrabel goto err;
400f7116284SIan Campbell
40186200154SDavid Vrabel rc = evtchn_resize_ring(u);
40286200154SDavid Vrabel if (rc < 0)
40386200154SDavid Vrabel goto err;
40486200154SDavid Vrabel
4053c8f5965SViresh Kumar rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, IRQF_SHARED,
40673cc4bb0SDavid Vrabel u->name, evtchn);
40773cc4bb0SDavid Vrabel if (rc < 0)
40873cc4bb0SDavid Vrabel goto err;
40973cc4bb0SDavid Vrabel
41058f6259bSRahul Singh rc = evtchn_make_refcounted(port, is_static);
41173cc4bb0SDavid Vrabel return rc;
41273cc4bb0SDavid Vrabel
41373cc4bb0SDavid Vrabel err:
414e7e44e44SWei Liu /* bind failed, should close the port now */
41558f6259bSRahul Singh if (!is_static)
41658f6259bSRahul Singh xen_evtchn_close(port);
41758f6259bSRahul Singh
41873cc4bb0SDavid Vrabel del_evtchn(u, evtchn);
419f7116284SIan Campbell return rc;
420f7116284SIan Campbell }
421f7116284SIan Campbell
evtchn_unbind_from_user(struct per_user_data * u,struct user_evtchn * evtchn)42273cc4bb0SDavid Vrabel static void evtchn_unbind_from_user(struct per_user_data *u,
42373cc4bb0SDavid Vrabel struct user_evtchn *evtchn)
424f7116284SIan Campbell {
42573cc4bb0SDavid Vrabel int irq = irq_from_evtchn(evtchn->port);
426f7116284SIan Campbell
427e7e44e44SWei Liu BUG_ON(irq < 0);
428e7e44e44SWei Liu
429*99e42503SJuergen Gross evtchn->unbinding = true;
43073cc4bb0SDavid Vrabel unbind_from_irqhandler(irq, evtchn);
4310a4666b5SJeremy Fitzhardinge
43273cc4bb0SDavid Vrabel del_evtchn(u, evtchn);
433f7116284SIan Campbell }
434f7116284SIan Campbell
evtchn_ioctl(struct file * file,unsigned int cmd,unsigned long arg)435f7116284SIan Campbell static long evtchn_ioctl(struct file *file,
436f7116284SIan Campbell unsigned int cmd, unsigned long arg)
437f7116284SIan Campbell {
438f7116284SIan Campbell int rc;
439f7116284SIan Campbell struct per_user_data *u = file->private_data;
440f7116284SIan Campbell void __user *uarg = (void __user *) arg;
441f7116284SIan Campbell
4420a4666b5SJeremy Fitzhardinge /* Prevent bind from racing with unbind */
4430a4666b5SJeremy Fitzhardinge mutex_lock(&u->bind_mutex);
4440a4666b5SJeremy Fitzhardinge
445f7116284SIan Campbell switch (cmd) {
446f7116284SIan Campbell case IOCTL_EVTCHN_BIND_VIRQ: {
447f7116284SIan Campbell struct ioctl_evtchn_bind_virq bind;
448f7116284SIan Campbell struct evtchn_bind_virq bind_virq;
449f7116284SIan Campbell
450fbc872c3SDavid Vrabel rc = -EACCES;
451fbc872c3SDavid Vrabel if (u->restrict_domid != UNRESTRICTED_DOMID)
452fbc872c3SDavid Vrabel break;
453fbc872c3SDavid Vrabel
454f7116284SIan Campbell rc = -EFAULT;
455f7116284SIan Campbell if (copy_from_user(&bind, uarg, sizeof(bind)))
456f7116284SIan Campbell break;
457f7116284SIan Campbell
458f7116284SIan Campbell bind_virq.virq = bind.virq;
459cbbb4682SVitaly Kuznetsov bind_virq.vcpu = xen_vcpu_nr(0);
460f7116284SIan Campbell rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
461f7116284SIan Campbell &bind_virq);
462f7116284SIan Campbell if (rc != 0)
463f7116284SIan Campbell break;
464f7116284SIan Campbell
46558f6259bSRahul Singh rc = evtchn_bind_to_user(u, bind_virq.port, false);
466f7116284SIan Campbell if (rc == 0)
467f7116284SIan Campbell rc = bind_virq.port;
468f7116284SIan Campbell break;
469f7116284SIan Campbell }
470f7116284SIan Campbell
471f7116284SIan Campbell case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
472f7116284SIan Campbell struct ioctl_evtchn_bind_interdomain bind;
473f7116284SIan Campbell struct evtchn_bind_interdomain bind_interdomain;
474f7116284SIan Campbell
475f7116284SIan Campbell rc = -EFAULT;
476f7116284SIan Campbell if (copy_from_user(&bind, uarg, sizeof(bind)))
477f7116284SIan Campbell break;
478f7116284SIan Campbell
479fbc872c3SDavid Vrabel rc = -EACCES;
480fbc872c3SDavid Vrabel if (u->restrict_domid != UNRESTRICTED_DOMID &&
481fbc872c3SDavid Vrabel u->restrict_domid != bind.remote_domain)
482fbc872c3SDavid Vrabel break;
483fbc872c3SDavid Vrabel
484f7116284SIan Campbell bind_interdomain.remote_dom = bind.remote_domain;
485f7116284SIan Campbell bind_interdomain.remote_port = bind.remote_port;
486f7116284SIan Campbell rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
487f7116284SIan Campbell &bind_interdomain);
488f7116284SIan Campbell if (rc != 0)
489f7116284SIan Campbell break;
490f7116284SIan Campbell
49158f6259bSRahul Singh rc = evtchn_bind_to_user(u, bind_interdomain.local_port, false);
49267473b81SThomas Gleixner if (rc == 0)
493f7116284SIan Campbell rc = bind_interdomain.local_port;
494f7116284SIan Campbell break;
495f7116284SIan Campbell }
496f7116284SIan Campbell
497f7116284SIan Campbell case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
498f7116284SIan Campbell struct ioctl_evtchn_bind_unbound_port bind;
499f7116284SIan Campbell struct evtchn_alloc_unbound alloc_unbound;
500f7116284SIan Campbell
501fbc872c3SDavid Vrabel rc = -EACCES;
502fbc872c3SDavid Vrabel if (u->restrict_domid != UNRESTRICTED_DOMID)
503fbc872c3SDavid Vrabel break;
504fbc872c3SDavid Vrabel
505f7116284SIan Campbell rc = -EFAULT;
506f7116284SIan Campbell if (copy_from_user(&bind, uarg, sizeof(bind)))
507f7116284SIan Campbell break;
508f7116284SIan Campbell
509f7116284SIan Campbell alloc_unbound.dom = DOMID_SELF;
510f7116284SIan Campbell alloc_unbound.remote_dom = bind.remote_domain;
511f7116284SIan Campbell rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
512f7116284SIan Campbell &alloc_unbound);
513f7116284SIan Campbell if (rc != 0)
514f7116284SIan Campbell break;
515f7116284SIan Campbell
51658f6259bSRahul Singh rc = evtchn_bind_to_user(u, alloc_unbound.port, false);
517f7116284SIan Campbell if (rc == 0)
518f7116284SIan Campbell rc = alloc_unbound.port;
519f7116284SIan Campbell break;
520f7116284SIan Campbell }
521f7116284SIan Campbell
522f7116284SIan Campbell case IOCTL_EVTCHN_UNBIND: {
523f7116284SIan Campbell struct ioctl_evtchn_unbind unbind;
52473cc4bb0SDavid Vrabel struct user_evtchn *evtchn;
525f7116284SIan Campbell
526f7116284SIan Campbell rc = -EFAULT;
527f7116284SIan Campbell if (copy_from_user(&unbind, uarg, sizeof(unbind)))
528f7116284SIan Campbell break;
529f7116284SIan Campbell
530f7116284SIan Campbell rc = -EINVAL;
5310dc0064aSDavid Vrabel if (unbind.port >= xen_evtchn_nr_channels())
532f7116284SIan Campbell break;
533f7116284SIan Campbell
534f7116284SIan Campbell rc = -ENOTCONN;
53573cc4bb0SDavid Vrabel evtchn = find_evtchn(u, unbind.port);
53673cc4bb0SDavid Vrabel if (!evtchn)
537f7116284SIan Campbell break;
538f7116284SIan Campbell
5393f5e554fSJeremy Fitzhardinge disable_irq(irq_from_evtchn(unbind.port));
54073cc4bb0SDavid Vrabel evtchn_unbind_from_user(u, evtchn);
541f7116284SIan Campbell rc = 0;
542f7116284SIan Campbell break;
543f7116284SIan Campbell }
544f7116284SIan Campbell
54558f6259bSRahul Singh case IOCTL_EVTCHN_BIND_STATIC: {
54658f6259bSRahul Singh struct ioctl_evtchn_bind bind;
54758f6259bSRahul Singh struct user_evtchn *evtchn;
54858f6259bSRahul Singh
54958f6259bSRahul Singh rc = -EFAULT;
55058f6259bSRahul Singh if (copy_from_user(&bind, uarg, sizeof(bind)))
55158f6259bSRahul Singh break;
55258f6259bSRahul Singh
55358f6259bSRahul Singh rc = -EISCONN;
55458f6259bSRahul Singh evtchn = find_evtchn(u, bind.port);
55558f6259bSRahul Singh if (evtchn)
55658f6259bSRahul Singh break;
55758f6259bSRahul Singh
55858f6259bSRahul Singh rc = evtchn_bind_to_user(u, bind.port, true);
55958f6259bSRahul Singh break;
56058f6259bSRahul Singh }
56158f6259bSRahul Singh
562f7116284SIan Campbell case IOCTL_EVTCHN_NOTIFY: {
563f7116284SIan Campbell struct ioctl_evtchn_notify notify;
56473cc4bb0SDavid Vrabel struct user_evtchn *evtchn;
565f7116284SIan Campbell
566f7116284SIan Campbell rc = -EFAULT;
567f7116284SIan Campbell if (copy_from_user(¬ify, uarg, sizeof(notify)))
568f7116284SIan Campbell break;
569f7116284SIan Campbell
570f7116284SIan Campbell rc = -ENOTCONN;
57173cc4bb0SDavid Vrabel evtchn = find_evtchn(u, notify.port);
57273cc4bb0SDavid Vrabel if (evtchn) {
573f7116284SIan Campbell notify_remote_via_evtchn(notify.port);
574f7116284SIan Campbell rc = 0;
575f7116284SIan Campbell }
576f7116284SIan Campbell break;
577f7116284SIan Campbell }
578f7116284SIan Campbell
579f7116284SIan Campbell case IOCTL_EVTCHN_RESET: {
580f7116284SIan Campbell /* Initialise the ring to empty. Clear errors. */
581f7116284SIan Campbell mutex_lock(&u->ring_cons_mutex);
58273cc4bb0SDavid Vrabel spin_lock_irq(&u->ring_prod_lock);
5836977c0b5SJuergen Gross WRITE_ONCE(u->ring_cons, 0);
5846977c0b5SJuergen Gross WRITE_ONCE(u->ring_prod, 0);
5856977c0b5SJuergen Gross u->ring_overflow = 0;
58673cc4bb0SDavid Vrabel spin_unlock_irq(&u->ring_prod_lock);
587f7116284SIan Campbell mutex_unlock(&u->ring_cons_mutex);
588f7116284SIan Campbell rc = 0;
589f7116284SIan Campbell break;
590f7116284SIan Campbell }
591f7116284SIan Campbell
592fbc872c3SDavid Vrabel case IOCTL_EVTCHN_RESTRICT_DOMID: {
593fbc872c3SDavid Vrabel struct ioctl_evtchn_restrict_domid ierd;
594fbc872c3SDavid Vrabel
595fbc872c3SDavid Vrabel rc = -EACCES;
596fbc872c3SDavid Vrabel if (u->restrict_domid != UNRESTRICTED_DOMID)
597fbc872c3SDavid Vrabel break;
598fbc872c3SDavid Vrabel
599fbc872c3SDavid Vrabel rc = -EFAULT;
600fbc872c3SDavid Vrabel if (copy_from_user(&ierd, uarg, sizeof(ierd)))
601fbc872c3SDavid Vrabel break;
602fbc872c3SDavid Vrabel
603fbc872c3SDavid Vrabel rc = -EINVAL;
604fbc872c3SDavid Vrabel if (ierd.domid == 0 || ierd.domid >= DOMID_FIRST_RESERVED)
605fbc872c3SDavid Vrabel break;
606fbc872c3SDavid Vrabel
607fbc872c3SDavid Vrabel u->restrict_domid = ierd.domid;
608fbc872c3SDavid Vrabel rc = 0;
609fbc872c3SDavid Vrabel
610fbc872c3SDavid Vrabel break;
611fbc872c3SDavid Vrabel }
612fbc872c3SDavid Vrabel
613f7116284SIan Campbell default:
614f7116284SIan Campbell rc = -ENOSYS;
615f7116284SIan Campbell break;
616f7116284SIan Campbell }
6170a4666b5SJeremy Fitzhardinge mutex_unlock(&u->bind_mutex);
618f7116284SIan Campbell
619f7116284SIan Campbell return rc;
620f7116284SIan Campbell }
621f7116284SIan Campbell
evtchn_poll(struct file * file,poll_table * wait)622afc9a42bSAl Viro static __poll_t evtchn_poll(struct file *file, poll_table *wait)
623f7116284SIan Campbell {
624a9a08845SLinus Torvalds __poll_t mask = EPOLLOUT | EPOLLWRNORM;
625f7116284SIan Campbell struct per_user_data *u = file->private_data;
626f7116284SIan Campbell
627f7116284SIan Campbell poll_wait(file, &u->evtchn_wait, wait);
6286977c0b5SJuergen Gross if (READ_ONCE(u->ring_cons) != READ_ONCE(u->ring_prod))
629a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM;
630f7116284SIan Campbell if (u->ring_overflow)
631a9a08845SLinus Torvalds mask = EPOLLERR;
632f7116284SIan Campbell return mask;
633f7116284SIan Campbell }
634f7116284SIan Campbell
evtchn_fasync(int fd,struct file * filp,int on)635f7116284SIan Campbell static int evtchn_fasync(int fd, struct file *filp, int on)
636f7116284SIan Campbell {
637f7116284SIan Campbell struct per_user_data *u = filp->private_data;
638f7116284SIan Campbell return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
639f7116284SIan Campbell }
640f7116284SIan Campbell
evtchn_open(struct inode * inode,struct file * filp)641f7116284SIan Campbell static int evtchn_open(struct inode *inode, struct file *filp)
642f7116284SIan Campbell {
643f7116284SIan Campbell struct per_user_data *u;
644f7116284SIan Campbell
645f7116284SIan Campbell u = kzalloc(sizeof(*u), GFP_KERNEL);
646f7116284SIan Campbell if (u == NULL)
647f7116284SIan Campbell return -ENOMEM;
648f7116284SIan Campbell
649f7116284SIan Campbell u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm);
650f7116284SIan Campbell if (u->name == NULL) {
651f7116284SIan Campbell kfree(u);
652f7116284SIan Campbell return -ENOMEM;
653f7116284SIan Campbell }
654f7116284SIan Campbell
655f7116284SIan Campbell init_waitqueue_head(&u->evtchn_wait);
656f7116284SIan Campbell
6570a4666b5SJeremy Fitzhardinge mutex_init(&u->bind_mutex);
658f7116284SIan Campbell mutex_init(&u->ring_cons_mutex);
65973cc4bb0SDavid Vrabel spin_lock_init(&u->ring_prod_lock);
660f7116284SIan Campbell
661fbc872c3SDavid Vrabel u->restrict_domid = UNRESTRICTED_DOMID;
662fbc872c3SDavid Vrabel
663f7116284SIan Campbell filp->private_data = u;
664f7116284SIan Campbell
665c5bf68feSKirill Smelkov return stream_open(inode, filp);
666f7116284SIan Campbell }
667f7116284SIan Campbell
evtchn_release(struct inode * inode,struct file * filp)668f7116284SIan Campbell static int evtchn_release(struct inode *inode, struct file *filp)
669f7116284SIan Campbell {
670f7116284SIan Campbell struct per_user_data *u = filp->private_data;
67173cc4bb0SDavid Vrabel struct rb_node *node;
672f7116284SIan Campbell
67373cc4bb0SDavid Vrabel while ((node = u->evtchns.rb_node)) {
67473cc4bb0SDavid Vrabel struct user_evtchn *evtchn;
675f7116284SIan Campbell
67673cc4bb0SDavid Vrabel evtchn = rb_entry(node, struct user_evtchn, node);
67773cc4bb0SDavid Vrabel disable_irq(irq_from_evtchn(evtchn->port));
67873cc4bb0SDavid Vrabel evtchn_unbind_from_user(u, evtchn);
6793f5e554fSJeremy Fitzhardinge }
6803f5e554fSJeremy Fitzhardinge
68186200154SDavid Vrabel evtchn_free_ring(u->ring);
682f7116284SIan Campbell kfree(u->name);
683f7116284SIan Campbell kfree(u);
684f7116284SIan Campbell
685f7116284SIan Campbell return 0;
686f7116284SIan Campbell }
687f7116284SIan Campbell
688f7116284SIan Campbell static const struct file_operations evtchn_fops = {
689f7116284SIan Campbell .owner = THIS_MODULE,
690f7116284SIan Campbell .read = evtchn_read,
691f7116284SIan Campbell .write = evtchn_write,
692f7116284SIan Campbell .unlocked_ioctl = evtchn_ioctl,
693f7116284SIan Campbell .poll = evtchn_poll,
694f7116284SIan Campbell .fasync = evtchn_fasync,
695f7116284SIan Campbell .open = evtchn_open,
696f7116284SIan Campbell .release = evtchn_release,
697bc7fc5e3SJeremy Fitzhardinge .llseek = no_llseek,
698f7116284SIan Campbell };
699f7116284SIan Campbell
700f7116284SIan Campbell static struct miscdevice evtchn_miscdev = {
701f7116284SIan Campbell .minor = MISC_DYNAMIC_MINOR,
702376d908fSBastian Blank .name = "xen/evtchn",
703f7116284SIan Campbell .fops = &evtchn_fops,
704f7116284SIan Campbell };
evtchn_init(void)705f7116284SIan Campbell static int __init evtchn_init(void)
706f7116284SIan Campbell {
707f7116284SIan Campbell int err;
708f7116284SIan Campbell
709f7116284SIan Campbell if (!xen_domain())
710f7116284SIan Campbell return -ENODEV;
711f7116284SIan Campbell
71218283ea7SWei Liu /* Create '/dev/xen/evtchn'. */
713f7116284SIan Campbell err = misc_register(&evtchn_miscdev);
714f7116284SIan Campbell if (err != 0) {
715283c0972SJoe Perches pr_err("Could not register /dev/xen/evtchn\n");
716f7116284SIan Campbell return err;
717f7116284SIan Campbell }
718f7116284SIan Campbell
719283c0972SJoe Perches pr_info("Event-channel device installed\n");
720f7116284SIan Campbell
721f7116284SIan Campbell return 0;
722f7116284SIan Campbell }
723f7116284SIan Campbell
evtchn_cleanup(void)724f7116284SIan Campbell static void __exit evtchn_cleanup(void)
725f7116284SIan Campbell {
726f7116284SIan Campbell misc_deregister(&evtchn_miscdev);
727f7116284SIan Campbell }
728f7116284SIan Campbell
729f7116284SIan Campbell module_init(evtchn_init);
730f7116284SIan Campbell module_exit(evtchn_cleanup);
731f7116284SIan Campbell
732f7116284SIan Campbell MODULE_LICENSE("GPL");
733