xref: /openbmc/linux/drivers/xen/evtchn.c (revision c44b849cee8c3ac587da3b0980e01f77500d158c)
1f7116284SIan Campbell /******************************************************************************
2f7116284SIan Campbell  * evtchn.c
3f7116284SIan Campbell  *
4f7116284SIan Campbell  * Driver for receiving and demuxing event-channel signals.
5f7116284SIan Campbell  *
6f7116284SIan Campbell  * Copyright (c) 2004-2005, K A Fraser
7f7116284SIan Campbell  * Multi-process extensions Copyright (c) 2004, Steven Smith
8f7116284SIan Campbell  *
9f7116284SIan Campbell  * This program is free software; you can redistribute it and/or
10f7116284SIan Campbell  * modify it under the terms of the GNU General Public License version 2
11f7116284SIan Campbell  * as published by the Free Software Foundation; or, when distributed
12f7116284SIan Campbell  * separately from the Linux kernel or incorporated into other
13f7116284SIan Campbell  * software packages, subject to the following license:
14f7116284SIan Campbell  *
15f7116284SIan Campbell  * Permission is hereby granted, free of charge, to any person obtaining a copy
16f7116284SIan Campbell  * of this source file (the "Software"), to deal in the Software without
17f7116284SIan Campbell  * restriction, including without limitation the rights to use, copy, modify,
18f7116284SIan Campbell  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19f7116284SIan Campbell  * and to permit persons to whom the Software is furnished to do so, subject to
20f7116284SIan Campbell  * the following conditions:
21f7116284SIan Campbell  *
22f7116284SIan Campbell  * The above copyright notice and this permission notice shall be included in
23f7116284SIan Campbell  * all copies or substantial portions of the Software.
24f7116284SIan Campbell  *
25f7116284SIan Campbell  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26f7116284SIan Campbell  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27f7116284SIan Campbell  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28f7116284SIan Campbell  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29f7116284SIan Campbell  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30f7116284SIan Campbell  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31f7116284SIan Campbell  * IN THE SOFTWARE.
32f7116284SIan Campbell  */
33f7116284SIan Campbell 
34283c0972SJoe Perches #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
35283c0972SJoe Perches 
36f7116284SIan Campbell #include <linux/module.h>
37f7116284SIan Campbell #include <linux/kernel.h>
38f7116284SIan Campbell #include <linux/sched.h>
39f7116284SIan Campbell #include <linux/slab.h>
40f7116284SIan Campbell #include <linux/string.h>
41f7116284SIan Campbell #include <linux/errno.h>
42f7116284SIan Campbell #include <linux/fs.h>
43f7116284SIan Campbell #include <linux/miscdevice.h>
44f7116284SIan Campbell #include <linux/major.h>
45f7116284SIan Campbell #include <linux/proc_fs.h>
46f7116284SIan Campbell #include <linux/stat.h>
47f7116284SIan Campbell #include <linux/poll.h>
48f7116284SIan Campbell #include <linux/irq.h>
49f7116284SIan Campbell #include <linux/init.h>
50f7116284SIan Campbell #include <linux/mutex.h>
51f7116284SIan Campbell #include <linux/cpu.h>
5286200154SDavid Vrabel #include <linux/mm.h>
5386200154SDavid Vrabel #include <linux/vmalloc.h>
541ccbf534SJeremy Fitzhardinge 
551ccbf534SJeremy Fitzhardinge #include <xen/xen.h>
56f7116284SIan Campbell #include <xen/events.h>
57f7116284SIan Campbell #include <xen/evtchn.h>
58cbbb4682SVitaly Kuznetsov #include <xen/xen-ops.h>
59f7116284SIan Campbell #include <asm/xen/hypervisor.h>
60f7116284SIan Campbell 
61f7116284SIan Campbell struct per_user_data {
620a4666b5SJeremy Fitzhardinge 	struct mutex bind_mutex; /* serialize bind/unbind operations */
6373cc4bb0SDavid Vrabel 	struct rb_root evtchns;
6486200154SDavid Vrabel 	unsigned int nr_evtchns;
650a4666b5SJeremy Fitzhardinge 
66f7116284SIan Campbell 	/* Notification ring, accessed via /dev/xen/evtchn. */
6786200154SDavid Vrabel 	unsigned int ring_size;
68f7116284SIan Campbell 	evtchn_port_t *ring;
69f7116284SIan Campbell 	unsigned int ring_cons, ring_prod, ring_overflow;
70f7116284SIan Campbell 	struct mutex ring_cons_mutex; /* protect against concurrent readers */
7173cc4bb0SDavid Vrabel 	spinlock_t ring_prod_lock; /* product against concurrent interrupts */
72f7116284SIan Campbell 
73f7116284SIan Campbell 	/* Processes wait on this queue when ring is empty. */
74f7116284SIan Campbell 	wait_queue_head_t evtchn_wait;
75f7116284SIan Campbell 	struct fasync_struct *evtchn_async_queue;
76f7116284SIan Campbell 	const char *name;
77fbc872c3SDavid Vrabel 
78fbc872c3SDavid Vrabel 	domid_t restrict_domid;
79f7116284SIan Campbell };
80f7116284SIan Campbell 
81fbc872c3SDavid Vrabel #define UNRESTRICTED_DOMID ((domid_t)-1)
82fbc872c3SDavid Vrabel 
8373cc4bb0SDavid Vrabel struct user_evtchn {
8473cc4bb0SDavid Vrabel 	struct rb_node node;
8573cc4bb0SDavid Vrabel 	struct per_user_data *user;
860102e4efSYan Yankovskyi 	evtchn_port_t port;
8773cc4bb0SDavid Vrabel 	bool enabled;
8873cc4bb0SDavid Vrabel };
89f7116284SIan Campbell 
9086200154SDavid Vrabel static void evtchn_free_ring(evtchn_port_t *ring)
9186200154SDavid Vrabel {
9286200154SDavid Vrabel 	kvfree(ring);
9386200154SDavid Vrabel }
9486200154SDavid Vrabel 
9586200154SDavid Vrabel static unsigned int evtchn_ring_offset(struct per_user_data *u,
9686200154SDavid Vrabel 				       unsigned int idx)
9786200154SDavid Vrabel {
9886200154SDavid Vrabel 	return idx & (u->ring_size - 1);
9986200154SDavid Vrabel }
10086200154SDavid Vrabel 
10186200154SDavid Vrabel static evtchn_port_t *evtchn_ring_entry(struct per_user_data *u,
10286200154SDavid Vrabel 					unsigned int idx)
10386200154SDavid Vrabel {
10486200154SDavid Vrabel 	return u->ring + evtchn_ring_offset(u, idx);
10586200154SDavid Vrabel }
10686200154SDavid Vrabel 
10773cc4bb0SDavid Vrabel static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
108e3cc067bSJeremy Fitzhardinge {
10973cc4bb0SDavid Vrabel 	struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL;
110e3cc067bSJeremy Fitzhardinge 
11186200154SDavid Vrabel 	u->nr_evtchns++;
11286200154SDavid Vrabel 
11373cc4bb0SDavid Vrabel 	while (*new) {
11473cc4bb0SDavid Vrabel 		struct user_evtchn *this;
115e3cc067bSJeremy Fitzhardinge 
1162f60b288SGeliang Tang 		this = rb_entry(*new, struct user_evtchn, node);
117e3cc067bSJeremy Fitzhardinge 
11873cc4bb0SDavid Vrabel 		parent = *new;
11973cc4bb0SDavid Vrabel 		if (this->port < evtchn->port)
12073cc4bb0SDavid Vrabel 			new = &((*new)->rb_left);
12173cc4bb0SDavid Vrabel 		else if (this->port > evtchn->port)
12273cc4bb0SDavid Vrabel 			new = &((*new)->rb_right);
123e3cc067bSJeremy Fitzhardinge 		else
12473cc4bb0SDavid Vrabel 			return -EEXIST;
12573cc4bb0SDavid Vrabel 	}
12673cc4bb0SDavid Vrabel 
12773cc4bb0SDavid Vrabel 	/* Add new node and rebalance tree. */
12873cc4bb0SDavid Vrabel 	rb_link_node(&evtchn->node, parent, new);
12973cc4bb0SDavid Vrabel 	rb_insert_color(&evtchn->node, &u->evtchns);
13073cc4bb0SDavid Vrabel 
13173cc4bb0SDavid Vrabel 	return 0;
13273cc4bb0SDavid Vrabel }
13373cc4bb0SDavid Vrabel 
13473cc4bb0SDavid Vrabel static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
13573cc4bb0SDavid Vrabel {
13686200154SDavid Vrabel 	u->nr_evtchns--;
13773cc4bb0SDavid Vrabel 	rb_erase(&evtchn->node, &u->evtchns);
13873cc4bb0SDavid Vrabel 	kfree(evtchn);
13973cc4bb0SDavid Vrabel }
14073cc4bb0SDavid Vrabel 
1410102e4efSYan Yankovskyi static struct user_evtchn *find_evtchn(struct per_user_data *u,
1420102e4efSYan Yankovskyi 				       evtchn_port_t port)
14373cc4bb0SDavid Vrabel {
14473cc4bb0SDavid Vrabel 	struct rb_node *node = u->evtchns.rb_node;
14573cc4bb0SDavid Vrabel 
14673cc4bb0SDavid Vrabel 	while (node) {
14773cc4bb0SDavid Vrabel 		struct user_evtchn *evtchn;
14873cc4bb0SDavid Vrabel 
1492f60b288SGeliang Tang 		evtchn = rb_entry(node, struct user_evtchn, node);
15073cc4bb0SDavid Vrabel 
15173cc4bb0SDavid Vrabel 		if (evtchn->port < port)
15273cc4bb0SDavid Vrabel 			node = node->rb_left;
15373cc4bb0SDavid Vrabel 		else if (evtchn->port > port)
15473cc4bb0SDavid Vrabel 			node = node->rb_right;
15573cc4bb0SDavid Vrabel 		else
15673cc4bb0SDavid Vrabel 			return evtchn;
15773cc4bb0SDavid Vrabel 	}
15873cc4bb0SDavid Vrabel 	return NULL;
159e3cc067bSJeremy Fitzhardinge }
160e3cc067bSJeremy Fitzhardinge 
16170697d54SJeremy Fitzhardinge static irqreturn_t evtchn_interrupt(int irq, void *data)
162f7116284SIan Campbell {
16373cc4bb0SDavid Vrabel 	struct user_evtchn *evtchn = data;
16473cc4bb0SDavid Vrabel 	struct per_user_data *u = evtchn->user;
165f7116284SIan Campbell 
16673cc4bb0SDavid Vrabel 	WARN(!evtchn->enabled,
1670102e4efSYan Yankovskyi 	     "Interrupt for port %u, but apparently not enabled; per-user %p\n",
16873cc4bb0SDavid Vrabel 	     evtchn->port, u);
169f7116284SIan Campbell 
17073cc4bb0SDavid Vrabel 	evtchn->enabled = false;
17173cc4bb0SDavid Vrabel 
17273cc4bb0SDavid Vrabel 	spin_lock(&u->ring_prod_lock);
173f7116284SIan Campbell 
17486200154SDavid Vrabel 	if ((u->ring_prod - u->ring_cons) < u->ring_size) {
17586200154SDavid Vrabel 		*evtchn_ring_entry(u, u->ring_prod) = evtchn->port;
176f7116284SIan Campbell 		wmb(); /* Ensure ring contents visible */
177f7116284SIan Campbell 		if (u->ring_cons == u->ring_prod++) {
178f7116284SIan Campbell 			wake_up_interruptible(&u->evtchn_wait);
179f7116284SIan Campbell 			kill_fasync(&u->evtchn_async_queue,
180f7116284SIan Campbell 				    SIGIO, POLL_IN);
181f7116284SIan Campbell 		}
182e3cc067bSJeremy Fitzhardinge 	} else
183f7116284SIan Campbell 		u->ring_overflow = 1;
184f7116284SIan Campbell 
18573cc4bb0SDavid Vrabel 	spin_unlock(&u->ring_prod_lock);
186f7116284SIan Campbell 
187f7116284SIan Campbell 	return IRQ_HANDLED;
188f7116284SIan Campbell }
189f7116284SIan Campbell 
190f7116284SIan Campbell static ssize_t evtchn_read(struct file *file, char __user *buf,
191f7116284SIan Campbell 			   size_t count, loff_t *ppos)
192f7116284SIan Campbell {
193f7116284SIan Campbell 	int rc;
194f7116284SIan Campbell 	unsigned int c, p, bytes1 = 0, bytes2 = 0;
195f7116284SIan Campbell 	struct per_user_data *u = file->private_data;
196f7116284SIan Campbell 
197f7116284SIan Campbell 	/* Whole number of ports. */
198f7116284SIan Campbell 	count &= ~(sizeof(evtchn_port_t)-1);
199f7116284SIan Campbell 
200f7116284SIan Campbell 	if (count == 0)
201f7116284SIan Campbell 		return 0;
202f7116284SIan Campbell 
203f7116284SIan Campbell 	if (count > PAGE_SIZE)
204f7116284SIan Campbell 		count = PAGE_SIZE;
205f7116284SIan Campbell 
206f7116284SIan Campbell 	for (;;) {
207f7116284SIan Campbell 		mutex_lock(&u->ring_cons_mutex);
208f7116284SIan Campbell 
209f7116284SIan Campbell 		rc = -EFBIG;
210f7116284SIan Campbell 		if (u->ring_overflow)
211f7116284SIan Campbell 			goto unlock_out;
212f7116284SIan Campbell 
213f7116284SIan Campbell 		c = u->ring_cons;
214f7116284SIan Campbell 		p = u->ring_prod;
215f7116284SIan Campbell 		if (c != p)
216f7116284SIan Campbell 			break;
217f7116284SIan Campbell 
218f7116284SIan Campbell 		mutex_unlock(&u->ring_cons_mutex);
219f7116284SIan Campbell 
220f7116284SIan Campbell 		if (file->f_flags & O_NONBLOCK)
221f7116284SIan Campbell 			return -EAGAIN;
222f7116284SIan Campbell 
223f7116284SIan Campbell 		rc = wait_event_interruptible(u->evtchn_wait,
224f7116284SIan Campbell 					      u->ring_cons != u->ring_prod);
225f7116284SIan Campbell 		if (rc)
226f7116284SIan Campbell 			return rc;
227f7116284SIan Campbell 	}
228f7116284SIan Campbell 
229f7116284SIan Campbell 	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
23086200154SDavid Vrabel 	if (((c ^ p) & u->ring_size) != 0) {
23186200154SDavid Vrabel 		bytes1 = (u->ring_size - evtchn_ring_offset(u, c)) *
232f7116284SIan Campbell 			sizeof(evtchn_port_t);
23386200154SDavid Vrabel 		bytes2 = evtchn_ring_offset(u, p) * sizeof(evtchn_port_t);
234f7116284SIan Campbell 	} else {
235f7116284SIan Campbell 		bytes1 = (p - c) * sizeof(evtchn_port_t);
236f7116284SIan Campbell 		bytes2 = 0;
237f7116284SIan Campbell 	}
238f7116284SIan Campbell 
239f7116284SIan Campbell 	/* Truncate chunks according to caller's maximum byte count. */
240f7116284SIan Campbell 	if (bytes1 > count) {
241f7116284SIan Campbell 		bytes1 = count;
242f7116284SIan Campbell 		bytes2 = 0;
243f7116284SIan Campbell 	} else if ((bytes1 + bytes2) > count) {
244f7116284SIan Campbell 		bytes2 = count - bytes1;
245f7116284SIan Campbell 	}
246f7116284SIan Campbell 
247f7116284SIan Campbell 	rc = -EFAULT;
248f7116284SIan Campbell 	rmb(); /* Ensure that we see the port before we copy it. */
24986200154SDavid Vrabel 	if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) ||
250f7116284SIan Campbell 	    ((bytes2 != 0) &&
251f7116284SIan Campbell 	     copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
252f7116284SIan Campbell 		goto unlock_out;
253f7116284SIan Campbell 
254f7116284SIan Campbell 	u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
255f7116284SIan Campbell 	rc = bytes1 + bytes2;
256f7116284SIan Campbell 
257f7116284SIan Campbell  unlock_out:
258f7116284SIan Campbell 	mutex_unlock(&u->ring_cons_mutex);
259f7116284SIan Campbell 	return rc;
260f7116284SIan Campbell }
261f7116284SIan Campbell 
262f7116284SIan Campbell static ssize_t evtchn_write(struct file *file, const char __user *buf,
263f7116284SIan Campbell 			    size_t count, loff_t *ppos)
264f7116284SIan Campbell {
265f7116284SIan Campbell 	int rc, i;
266f7116284SIan Campbell 	evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
267f7116284SIan Campbell 	struct per_user_data *u = file->private_data;
268f7116284SIan Campbell 
269f7116284SIan Campbell 	if (kbuf == NULL)
270f7116284SIan Campbell 		return -ENOMEM;
271f7116284SIan Campbell 
272f7116284SIan Campbell 	/* Whole number of ports. */
273f7116284SIan Campbell 	count &= ~(sizeof(evtchn_port_t)-1);
274f7116284SIan Campbell 
275f7116284SIan Campbell 	rc = 0;
276f7116284SIan Campbell 	if (count == 0)
277f7116284SIan Campbell 		goto out;
278f7116284SIan Campbell 
279f7116284SIan Campbell 	if (count > PAGE_SIZE)
280f7116284SIan Campbell 		count = PAGE_SIZE;
281f7116284SIan Campbell 
282f7116284SIan Campbell 	rc = -EFAULT;
283f7116284SIan Campbell 	if (copy_from_user(kbuf, buf, count) != 0)
284f7116284SIan Campbell 		goto out;
285f7116284SIan Campbell 
28673cc4bb0SDavid Vrabel 	mutex_lock(&u->bind_mutex);
287e3cc067bSJeremy Fitzhardinge 
288e3cc067bSJeremy Fitzhardinge 	for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) {
2890102e4efSYan Yankovskyi 		evtchn_port_t port = kbuf[i];
29073cc4bb0SDavid Vrabel 		struct user_evtchn *evtchn;
291e3cc067bSJeremy Fitzhardinge 
29273cc4bb0SDavid Vrabel 		evtchn = find_evtchn(u, port);
29373cc4bb0SDavid Vrabel 		if (evtchn && !evtchn->enabled) {
29473cc4bb0SDavid Vrabel 			evtchn->enabled = true;
295*c44b849cSJuergen Gross 			xen_irq_lateeoi(irq_from_evtchn(port), 0);
296e3cc067bSJeremy Fitzhardinge 		}
297e3cc067bSJeremy Fitzhardinge 	}
298e3cc067bSJeremy Fitzhardinge 
29973cc4bb0SDavid Vrabel 	mutex_unlock(&u->bind_mutex);
300f7116284SIan Campbell 
301f7116284SIan Campbell 	rc = count;
302f7116284SIan Campbell 
303f7116284SIan Campbell  out:
304f7116284SIan Campbell 	free_page((unsigned long)kbuf);
305f7116284SIan Campbell 	return rc;
306f7116284SIan Campbell }
307f7116284SIan Campbell 
30886200154SDavid Vrabel static int evtchn_resize_ring(struct per_user_data *u)
30986200154SDavid Vrabel {
31086200154SDavid Vrabel 	unsigned int new_size;
31186200154SDavid Vrabel 	evtchn_port_t *new_ring, *old_ring;
31286200154SDavid Vrabel 
31386200154SDavid Vrabel 	/*
31486200154SDavid Vrabel 	 * Ensure the ring is large enough to capture all possible
31586200154SDavid Vrabel 	 * events. i.e., one free slot for each bound event.
31686200154SDavid Vrabel 	 */
31786200154SDavid Vrabel 	if (u->nr_evtchns <= u->ring_size)
31886200154SDavid Vrabel 		return 0;
31986200154SDavid Vrabel 
32086200154SDavid Vrabel 	if (u->ring_size == 0)
32186200154SDavid Vrabel 		new_size = 64;
32286200154SDavid Vrabel 	else
32386200154SDavid Vrabel 		new_size = 2 * u->ring_size;
32486200154SDavid Vrabel 
325344476e1SKees Cook 	new_ring = kvmalloc_array(new_size, sizeof(*new_ring), GFP_KERNEL);
32686200154SDavid Vrabel 	if (!new_ring)
32786200154SDavid Vrabel 		return -ENOMEM;
32886200154SDavid Vrabel 
32986200154SDavid Vrabel 	old_ring = u->ring;
33086200154SDavid Vrabel 
33186200154SDavid Vrabel 	/*
33286200154SDavid Vrabel 	 * Access to the ring contents is serialized by either the
33386200154SDavid Vrabel 	 * prod /or/ cons lock so take both when resizing.
33486200154SDavid Vrabel 	 */
33586200154SDavid Vrabel 	mutex_lock(&u->ring_cons_mutex);
33686200154SDavid Vrabel 	spin_lock_irq(&u->ring_prod_lock);
33786200154SDavid Vrabel 
33886200154SDavid Vrabel 	/*
33986200154SDavid Vrabel 	 * Copy the old ring contents to the new ring.
34086200154SDavid Vrabel 	 *
34127e0e638SJan Beulich 	 * To take care of wrapping, a full ring, and the new index
34227e0e638SJan Beulich 	 * pointing into the second half, simply copy the old contents
34327e0e638SJan Beulich 	 * twice.
34486200154SDavid Vrabel 	 *
34586200154SDavid Vrabel 	 * +---------+    +------------------+
34627e0e638SJan Beulich 	 * |34567  12| -> |34567  1234567  12|
34727e0e638SJan Beulich 	 * +-----p-c-+    +-------c------p---+
34886200154SDavid Vrabel 	 */
34927e0e638SJan Beulich 	memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
35027e0e638SJan Beulich 	memcpy(new_ring + u->ring_size, old_ring,
35127e0e638SJan Beulich 	       u->ring_size * sizeof(*u->ring));
35286200154SDavid Vrabel 
35386200154SDavid Vrabel 	u->ring = new_ring;
35486200154SDavid Vrabel 	u->ring_size = new_size;
35586200154SDavid Vrabel 
35686200154SDavid Vrabel 	spin_unlock_irq(&u->ring_prod_lock);
35786200154SDavid Vrabel 	mutex_unlock(&u->ring_cons_mutex);
35886200154SDavid Vrabel 
35986200154SDavid Vrabel 	evtchn_free_ring(old_ring);
36086200154SDavid Vrabel 
36186200154SDavid Vrabel 	return 0;
36286200154SDavid Vrabel }
36386200154SDavid Vrabel 
3640102e4efSYan Yankovskyi static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port)
365f7116284SIan Campbell {
36673cc4bb0SDavid Vrabel 	struct user_evtchn *evtchn;
36773cc4bb0SDavid Vrabel 	struct evtchn_close close;
368f7116284SIan Campbell 	int rc = 0;
369f7116284SIan Campbell 
3700a4666b5SJeremy Fitzhardinge 	/*
3710a4666b5SJeremy Fitzhardinge 	 * Ports are never reused, so every caller should pass in a
3720a4666b5SJeremy Fitzhardinge 	 * unique port.
3730a4666b5SJeremy Fitzhardinge 	 *
3740a4666b5SJeremy Fitzhardinge 	 * (Locking not necessary because we haven't registered the
3750a4666b5SJeremy Fitzhardinge 	 * interrupt handler yet, and our caller has already
3760a4666b5SJeremy Fitzhardinge 	 * serialized bind operations.)
3770a4666b5SJeremy Fitzhardinge 	 */
37873cc4bb0SDavid Vrabel 
37973cc4bb0SDavid Vrabel 	evtchn = kzalloc(sizeof(*evtchn), GFP_KERNEL);
38073cc4bb0SDavid Vrabel 	if (!evtchn)
38173cc4bb0SDavid Vrabel 		return -ENOMEM;
38273cc4bb0SDavid Vrabel 
38373cc4bb0SDavid Vrabel 	evtchn->user = u;
38473cc4bb0SDavid Vrabel 	evtchn->port = port;
38573cc4bb0SDavid Vrabel 	evtchn->enabled = true; /* start enabled */
38673cc4bb0SDavid Vrabel 
38773cc4bb0SDavid Vrabel 	rc = add_evtchn(u, evtchn);
38873cc4bb0SDavid Vrabel 	if (rc < 0)
38973cc4bb0SDavid Vrabel 		goto err;
390f7116284SIan Campbell 
39186200154SDavid Vrabel 	rc = evtchn_resize_ring(u);
39286200154SDavid Vrabel 	if (rc < 0)
39386200154SDavid Vrabel 		goto err;
39486200154SDavid Vrabel 
395*c44b849cSJuergen Gross 	rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, 0,
39673cc4bb0SDavid Vrabel 					       u->name, evtchn);
39773cc4bb0SDavid Vrabel 	if (rc < 0)
39873cc4bb0SDavid Vrabel 		goto err;
39973cc4bb0SDavid Vrabel 
400420eb554SDaniel De Graaf 	rc = evtchn_make_refcounted(port);
40173cc4bb0SDavid Vrabel 	return rc;
40273cc4bb0SDavid Vrabel 
40373cc4bb0SDavid Vrabel err:
404e7e44e44SWei Liu 	/* bind failed, should close the port now */
405e7e44e44SWei Liu 	close.port = port;
406e7e44e44SWei Liu 	if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
407e7e44e44SWei Liu 		BUG();
40873cc4bb0SDavid Vrabel 	del_evtchn(u, evtchn);
409f7116284SIan Campbell 	return rc;
410f7116284SIan Campbell }
411f7116284SIan Campbell 
41273cc4bb0SDavid Vrabel static void evtchn_unbind_from_user(struct per_user_data *u,
41373cc4bb0SDavid Vrabel 				    struct user_evtchn *evtchn)
414f7116284SIan Campbell {
41573cc4bb0SDavid Vrabel 	int irq = irq_from_evtchn(evtchn->port);
416f7116284SIan Campbell 
417e7e44e44SWei Liu 	BUG_ON(irq < 0);
418e7e44e44SWei Liu 
41973cc4bb0SDavid Vrabel 	unbind_from_irqhandler(irq, evtchn);
4200a4666b5SJeremy Fitzhardinge 
42173cc4bb0SDavid Vrabel 	del_evtchn(u, evtchn);
422f7116284SIan Campbell }
423f7116284SIan Campbell 
424c48f64abSAnoob Soman static DEFINE_PER_CPU(int, bind_last_selected_cpu);
425c48f64abSAnoob Soman 
4260102e4efSYan Yankovskyi static void evtchn_bind_interdom_next_vcpu(evtchn_port_t evtchn)
427c48f64abSAnoob Soman {
428c48f64abSAnoob Soman 	unsigned int selected_cpu, irq;
429c48f64abSAnoob Soman 	struct irq_desc *desc;
430c48f64abSAnoob Soman 	unsigned long flags;
431c48f64abSAnoob Soman 
432c48f64abSAnoob Soman 	irq = irq_from_evtchn(evtchn);
433c48f64abSAnoob Soman 	desc = irq_to_desc(irq);
434c48f64abSAnoob Soman 
435c48f64abSAnoob Soman 	if (!desc)
436c48f64abSAnoob Soman 		return;
437c48f64abSAnoob Soman 
438c48f64abSAnoob Soman 	raw_spin_lock_irqsave(&desc->lock, flags);
439c48f64abSAnoob Soman 	selected_cpu = this_cpu_read(bind_last_selected_cpu);
440c48f64abSAnoob Soman 	selected_cpu = cpumask_next_and(selected_cpu,
441c48f64abSAnoob Soman 			desc->irq_common_data.affinity, cpu_online_mask);
442c48f64abSAnoob Soman 
443c48f64abSAnoob Soman 	if (unlikely(selected_cpu >= nr_cpu_ids))
444c48f64abSAnoob Soman 		selected_cpu = cpumask_first_and(desc->irq_common_data.affinity,
445c48f64abSAnoob Soman 				cpu_online_mask);
446c48f64abSAnoob Soman 
447c48f64abSAnoob Soman 	this_cpu_write(bind_last_selected_cpu, selected_cpu);
448c48f64abSAnoob Soman 
449c48f64abSAnoob Soman 	/* unmask expects irqs to be disabled */
450bce5963bSJuergen Gross 	xen_set_affinity_evtchn(desc, selected_cpu);
451c48f64abSAnoob Soman 	raw_spin_unlock_irqrestore(&desc->lock, flags);
452c48f64abSAnoob Soman }
453c48f64abSAnoob Soman 
454f7116284SIan Campbell static long evtchn_ioctl(struct file *file,
455f7116284SIan Campbell 			 unsigned int cmd, unsigned long arg)
456f7116284SIan Campbell {
457f7116284SIan Campbell 	int rc;
458f7116284SIan Campbell 	struct per_user_data *u = file->private_data;
459f7116284SIan Campbell 	void __user *uarg = (void __user *) arg;
460f7116284SIan Campbell 
4610a4666b5SJeremy Fitzhardinge 	/* Prevent bind from racing with unbind */
4620a4666b5SJeremy Fitzhardinge 	mutex_lock(&u->bind_mutex);
4630a4666b5SJeremy Fitzhardinge 
464f7116284SIan Campbell 	switch (cmd) {
465f7116284SIan Campbell 	case IOCTL_EVTCHN_BIND_VIRQ: {
466f7116284SIan Campbell 		struct ioctl_evtchn_bind_virq bind;
467f7116284SIan Campbell 		struct evtchn_bind_virq bind_virq;
468f7116284SIan Campbell 
469fbc872c3SDavid Vrabel 		rc = -EACCES;
470fbc872c3SDavid Vrabel 		if (u->restrict_domid != UNRESTRICTED_DOMID)
471fbc872c3SDavid Vrabel 			break;
472fbc872c3SDavid Vrabel 
473f7116284SIan Campbell 		rc = -EFAULT;
474f7116284SIan Campbell 		if (copy_from_user(&bind, uarg, sizeof(bind)))
475f7116284SIan Campbell 			break;
476f7116284SIan Campbell 
477f7116284SIan Campbell 		bind_virq.virq = bind.virq;
478cbbb4682SVitaly Kuznetsov 		bind_virq.vcpu = xen_vcpu_nr(0);
479f7116284SIan Campbell 		rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
480f7116284SIan Campbell 						 &bind_virq);
481f7116284SIan Campbell 		if (rc != 0)
482f7116284SIan Campbell 			break;
483f7116284SIan Campbell 
484f7116284SIan Campbell 		rc = evtchn_bind_to_user(u, bind_virq.port);
485f7116284SIan Campbell 		if (rc == 0)
486f7116284SIan Campbell 			rc = bind_virq.port;
487f7116284SIan Campbell 		break;
488f7116284SIan Campbell 	}
489f7116284SIan Campbell 
490f7116284SIan Campbell 	case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
491f7116284SIan Campbell 		struct ioctl_evtchn_bind_interdomain bind;
492f7116284SIan Campbell 		struct evtchn_bind_interdomain bind_interdomain;
493f7116284SIan Campbell 
494f7116284SIan Campbell 		rc = -EFAULT;
495f7116284SIan Campbell 		if (copy_from_user(&bind, uarg, sizeof(bind)))
496f7116284SIan Campbell 			break;
497f7116284SIan Campbell 
498fbc872c3SDavid Vrabel 		rc = -EACCES;
499fbc872c3SDavid Vrabel 		if (u->restrict_domid != UNRESTRICTED_DOMID &&
500fbc872c3SDavid Vrabel 		    u->restrict_domid != bind.remote_domain)
501fbc872c3SDavid Vrabel 			break;
502fbc872c3SDavid Vrabel 
503f7116284SIan Campbell 		bind_interdomain.remote_dom  = bind.remote_domain;
504f7116284SIan Campbell 		bind_interdomain.remote_port = bind.remote_port;
505f7116284SIan Campbell 		rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
506f7116284SIan Campbell 						 &bind_interdomain);
507f7116284SIan Campbell 		if (rc != 0)
508f7116284SIan Campbell 			break;
509f7116284SIan Campbell 
510f7116284SIan Campbell 		rc = evtchn_bind_to_user(u, bind_interdomain.local_port);
511c48f64abSAnoob Soman 		if (rc == 0) {
512f7116284SIan Campbell 			rc = bind_interdomain.local_port;
513c48f64abSAnoob Soman 			evtchn_bind_interdom_next_vcpu(rc);
514c48f64abSAnoob Soman 		}
515f7116284SIan Campbell 		break;
516f7116284SIan Campbell 	}
517f7116284SIan Campbell 
518f7116284SIan Campbell 	case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
519f7116284SIan Campbell 		struct ioctl_evtchn_bind_unbound_port bind;
520f7116284SIan Campbell 		struct evtchn_alloc_unbound alloc_unbound;
521f7116284SIan Campbell 
522fbc872c3SDavid Vrabel 		rc = -EACCES;
523fbc872c3SDavid Vrabel 		if (u->restrict_domid != UNRESTRICTED_DOMID)
524fbc872c3SDavid Vrabel 			break;
525fbc872c3SDavid Vrabel 
526f7116284SIan Campbell 		rc = -EFAULT;
527f7116284SIan Campbell 		if (copy_from_user(&bind, uarg, sizeof(bind)))
528f7116284SIan Campbell 			break;
529f7116284SIan Campbell 
530f7116284SIan Campbell 		alloc_unbound.dom        = DOMID_SELF;
531f7116284SIan Campbell 		alloc_unbound.remote_dom = bind.remote_domain;
532f7116284SIan Campbell 		rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
533f7116284SIan Campbell 						 &alloc_unbound);
534f7116284SIan Campbell 		if (rc != 0)
535f7116284SIan Campbell 			break;
536f7116284SIan Campbell 
537f7116284SIan Campbell 		rc = evtchn_bind_to_user(u, alloc_unbound.port);
538f7116284SIan Campbell 		if (rc == 0)
539f7116284SIan Campbell 			rc = alloc_unbound.port;
540f7116284SIan Campbell 		break;
541f7116284SIan Campbell 	}
542f7116284SIan Campbell 
543f7116284SIan Campbell 	case IOCTL_EVTCHN_UNBIND: {
544f7116284SIan Campbell 		struct ioctl_evtchn_unbind unbind;
54573cc4bb0SDavid Vrabel 		struct user_evtchn *evtchn;
546f7116284SIan Campbell 
547f7116284SIan Campbell 		rc = -EFAULT;
548f7116284SIan Campbell 		if (copy_from_user(&unbind, uarg, sizeof(unbind)))
549f7116284SIan Campbell 			break;
550f7116284SIan Campbell 
551f7116284SIan Campbell 		rc = -EINVAL;
5520dc0064aSDavid Vrabel 		if (unbind.port >= xen_evtchn_nr_channels())
553f7116284SIan Campbell 			break;
554f7116284SIan Campbell 
555f7116284SIan Campbell 		rc = -ENOTCONN;
55673cc4bb0SDavid Vrabel 		evtchn = find_evtchn(u, unbind.port);
55773cc4bb0SDavid Vrabel 		if (!evtchn)
558f7116284SIan Campbell 			break;
559f7116284SIan Campbell 
5603f5e554fSJeremy Fitzhardinge 		disable_irq(irq_from_evtchn(unbind.port));
56173cc4bb0SDavid Vrabel 		evtchn_unbind_from_user(u, evtchn);
562f7116284SIan Campbell 		rc = 0;
563f7116284SIan Campbell 		break;
564f7116284SIan Campbell 	}
565f7116284SIan Campbell 
566f7116284SIan Campbell 	case IOCTL_EVTCHN_NOTIFY: {
567f7116284SIan Campbell 		struct ioctl_evtchn_notify notify;
56873cc4bb0SDavid Vrabel 		struct user_evtchn *evtchn;
569f7116284SIan Campbell 
570f7116284SIan Campbell 		rc = -EFAULT;
571f7116284SIan Campbell 		if (copy_from_user(&notify, uarg, sizeof(notify)))
572f7116284SIan Campbell 			break;
573f7116284SIan Campbell 
574f7116284SIan Campbell 		rc = -ENOTCONN;
57573cc4bb0SDavid Vrabel 		evtchn = find_evtchn(u, notify.port);
57673cc4bb0SDavid Vrabel 		if (evtchn) {
577f7116284SIan Campbell 			notify_remote_via_evtchn(notify.port);
578f7116284SIan Campbell 			rc = 0;
579f7116284SIan Campbell 		}
580f7116284SIan Campbell 		break;
581f7116284SIan Campbell 	}
582f7116284SIan Campbell 
583f7116284SIan Campbell 	case IOCTL_EVTCHN_RESET: {
584f7116284SIan Campbell 		/* Initialise the ring to empty. Clear errors. */
585f7116284SIan Campbell 		mutex_lock(&u->ring_cons_mutex);
58673cc4bb0SDavid Vrabel 		spin_lock_irq(&u->ring_prod_lock);
587f7116284SIan Campbell 		u->ring_cons = u->ring_prod = u->ring_overflow = 0;
58873cc4bb0SDavid Vrabel 		spin_unlock_irq(&u->ring_prod_lock);
589f7116284SIan Campbell 		mutex_unlock(&u->ring_cons_mutex);
590f7116284SIan Campbell 		rc = 0;
591f7116284SIan Campbell 		break;
592f7116284SIan Campbell 	}
593f7116284SIan Campbell 
594fbc872c3SDavid Vrabel 	case IOCTL_EVTCHN_RESTRICT_DOMID: {
595fbc872c3SDavid Vrabel 		struct ioctl_evtchn_restrict_domid ierd;
596fbc872c3SDavid Vrabel 
597fbc872c3SDavid Vrabel 		rc = -EACCES;
598fbc872c3SDavid Vrabel 		if (u->restrict_domid != UNRESTRICTED_DOMID)
599fbc872c3SDavid Vrabel 			break;
600fbc872c3SDavid Vrabel 
601fbc872c3SDavid Vrabel 		rc = -EFAULT;
602fbc872c3SDavid Vrabel 		if (copy_from_user(&ierd, uarg, sizeof(ierd)))
603fbc872c3SDavid Vrabel 		    break;
604fbc872c3SDavid Vrabel 
605fbc872c3SDavid Vrabel 		rc = -EINVAL;
606fbc872c3SDavid Vrabel 		if (ierd.domid == 0 || ierd.domid >= DOMID_FIRST_RESERVED)
607fbc872c3SDavid Vrabel 			break;
608fbc872c3SDavid Vrabel 
609fbc872c3SDavid Vrabel 		u->restrict_domid = ierd.domid;
610fbc872c3SDavid Vrabel 		rc = 0;
611fbc872c3SDavid Vrabel 
612fbc872c3SDavid Vrabel 		break;
613fbc872c3SDavid Vrabel 	}
614fbc872c3SDavid Vrabel 
615f7116284SIan Campbell 	default:
616f7116284SIan Campbell 		rc = -ENOSYS;
617f7116284SIan Campbell 		break;
618f7116284SIan Campbell 	}
6190a4666b5SJeremy Fitzhardinge 	mutex_unlock(&u->bind_mutex);
620f7116284SIan Campbell 
621f7116284SIan Campbell 	return rc;
622f7116284SIan Campbell }
623f7116284SIan Campbell 
624afc9a42bSAl Viro static __poll_t evtchn_poll(struct file *file, poll_table *wait)
625f7116284SIan Campbell {
626a9a08845SLinus Torvalds 	__poll_t mask = EPOLLOUT | EPOLLWRNORM;
627f7116284SIan Campbell 	struct per_user_data *u = file->private_data;
628f7116284SIan Campbell 
629f7116284SIan Campbell 	poll_wait(file, &u->evtchn_wait, wait);
630f7116284SIan Campbell 	if (u->ring_cons != u->ring_prod)
631a9a08845SLinus Torvalds 		mask |= EPOLLIN | EPOLLRDNORM;
632f7116284SIan Campbell 	if (u->ring_overflow)
633a9a08845SLinus Torvalds 		mask = EPOLLERR;
634f7116284SIan Campbell 	return mask;
635f7116284SIan Campbell }
636f7116284SIan Campbell 
637f7116284SIan Campbell static int evtchn_fasync(int fd, struct file *filp, int on)
638f7116284SIan Campbell {
639f7116284SIan Campbell 	struct per_user_data *u = filp->private_data;
640f7116284SIan Campbell 	return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
641f7116284SIan Campbell }
642f7116284SIan Campbell 
643f7116284SIan Campbell static int evtchn_open(struct inode *inode, struct file *filp)
644f7116284SIan Campbell {
645f7116284SIan Campbell 	struct per_user_data *u;
646f7116284SIan Campbell 
647f7116284SIan Campbell 	u = kzalloc(sizeof(*u), GFP_KERNEL);
648f7116284SIan Campbell 	if (u == NULL)
649f7116284SIan Campbell 		return -ENOMEM;
650f7116284SIan Campbell 
651f7116284SIan Campbell 	u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm);
652f7116284SIan Campbell 	if (u->name == NULL) {
653f7116284SIan Campbell 		kfree(u);
654f7116284SIan Campbell 		return -ENOMEM;
655f7116284SIan Campbell 	}
656f7116284SIan Campbell 
657f7116284SIan Campbell 	init_waitqueue_head(&u->evtchn_wait);
658f7116284SIan Campbell 
6590a4666b5SJeremy Fitzhardinge 	mutex_init(&u->bind_mutex);
660f7116284SIan Campbell 	mutex_init(&u->ring_cons_mutex);
66173cc4bb0SDavid Vrabel 	spin_lock_init(&u->ring_prod_lock);
662f7116284SIan Campbell 
663fbc872c3SDavid Vrabel 	u->restrict_domid = UNRESTRICTED_DOMID;
664fbc872c3SDavid Vrabel 
665f7116284SIan Campbell 	filp->private_data = u;
666f7116284SIan Campbell 
667c5bf68feSKirill Smelkov 	return stream_open(inode, filp);
668f7116284SIan Campbell }
669f7116284SIan Campbell 
670f7116284SIan Campbell static int evtchn_release(struct inode *inode, struct file *filp)
671f7116284SIan Campbell {
672f7116284SIan Campbell 	struct per_user_data *u = filp->private_data;
67373cc4bb0SDavid Vrabel 	struct rb_node *node;
674f7116284SIan Campbell 
67573cc4bb0SDavid Vrabel 	while ((node = u->evtchns.rb_node)) {
67673cc4bb0SDavid Vrabel 		struct user_evtchn *evtchn;
677f7116284SIan Campbell 
67873cc4bb0SDavid Vrabel 		evtchn = rb_entry(node, struct user_evtchn, node);
67973cc4bb0SDavid Vrabel 		disable_irq(irq_from_evtchn(evtchn->port));
68073cc4bb0SDavid Vrabel 		evtchn_unbind_from_user(u, evtchn);
6813f5e554fSJeremy Fitzhardinge 	}
6823f5e554fSJeremy Fitzhardinge 
68386200154SDavid Vrabel 	evtchn_free_ring(u->ring);
684f7116284SIan Campbell 	kfree(u->name);
685f7116284SIan Campbell 	kfree(u);
686f7116284SIan Campbell 
687f7116284SIan Campbell 	return 0;
688f7116284SIan Campbell }
689f7116284SIan Campbell 
690f7116284SIan Campbell static const struct file_operations evtchn_fops = {
691f7116284SIan Campbell 	.owner   = THIS_MODULE,
692f7116284SIan Campbell 	.read    = evtchn_read,
693f7116284SIan Campbell 	.write   = evtchn_write,
694f7116284SIan Campbell 	.unlocked_ioctl = evtchn_ioctl,
695f7116284SIan Campbell 	.poll    = evtchn_poll,
696f7116284SIan Campbell 	.fasync  = evtchn_fasync,
697f7116284SIan Campbell 	.open    = evtchn_open,
698f7116284SIan Campbell 	.release = evtchn_release,
699bc7fc5e3SJeremy Fitzhardinge 	.llseek	 = no_llseek,
700f7116284SIan Campbell };
701f7116284SIan Campbell 
702f7116284SIan Campbell static struct miscdevice evtchn_miscdev = {
703f7116284SIan Campbell 	.minor        = MISC_DYNAMIC_MINOR,
704376d908fSBastian Blank 	.name         = "xen/evtchn",
705f7116284SIan Campbell 	.fops         = &evtchn_fops,
706f7116284SIan Campbell };
707f7116284SIan Campbell static int __init evtchn_init(void)
708f7116284SIan Campbell {
709f7116284SIan Campbell 	int err;
710f7116284SIan Campbell 
711f7116284SIan Campbell 	if (!xen_domain())
712f7116284SIan Campbell 		return -ENODEV;
713f7116284SIan Campbell 
71418283ea7SWei Liu 	/* Create '/dev/xen/evtchn'. */
715f7116284SIan Campbell 	err = misc_register(&evtchn_miscdev);
716f7116284SIan Campbell 	if (err != 0) {
717283c0972SJoe Perches 		pr_err("Could not register /dev/xen/evtchn\n");
718f7116284SIan Campbell 		return err;
719f7116284SIan Campbell 	}
720f7116284SIan Campbell 
721283c0972SJoe Perches 	pr_info("Event-channel device installed\n");
722f7116284SIan Campbell 
723f7116284SIan Campbell 	return 0;
724f7116284SIan Campbell }
725f7116284SIan Campbell 
726f7116284SIan Campbell static void __exit evtchn_cleanup(void)
727f7116284SIan Campbell {
728f7116284SIan Campbell 	misc_deregister(&evtchn_miscdev);
729f7116284SIan Campbell }
730f7116284SIan Campbell 
731f7116284SIan Campbell module_init(evtchn_init);
732f7116284SIan Campbell module_exit(evtchn_cleanup);
733f7116284SIan Campbell 
734f7116284SIan Campbell MODULE_LICENSE("GPL");
735