1 /****************************************************************************** 2 * evtchn.c 3 * 4 * Driver for receiving and demuxing event-channel signals. 5 * 6 * Copyright (c) 2004-2005, K A Fraser 7 * Multi-process extensions Copyright (c) 2004, Steven Smith 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License version 2 11 * as published by the Free Software Foundation; or, when distributed 12 * separately from the Linux kernel or incorporated into other 13 * software packages, subject to the following license: 14 * 15 * Permission is hereby granted, free of charge, to any person obtaining a copy 16 * of this source file (the "Software"), to deal in the Software without 17 * restriction, including without limitation the rights to use, copy, modify, 18 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 19 * and to permit persons to whom the Software is furnished to do so, subject to 20 * the following conditions: 21 * 22 * The above copyright notice and this permission notice shall be included in 23 * all copies or substantial portions of the Software. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 31 * IN THE SOFTWARE. 32 */ 33 34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 35 36 #include <linux/module.h> 37 #include <linux/kernel.h> 38 #include <linux/sched.h> 39 #include <linux/slab.h> 40 #include <linux/string.h> 41 #include <linux/errno.h> 42 #include <linux/fs.h> 43 #include <linux/miscdevice.h> 44 #include <linux/major.h> 45 #include <linux/proc_fs.h> 46 #include <linux/stat.h> 47 #include <linux/poll.h> 48 #include <linux/irq.h> 49 #include <linux/init.h> 50 #include <linux/mutex.h> 51 #include <linux/cpu.h> 52 53 #include <xen/xen.h> 54 #include <xen/events.h> 55 #include <xen/evtchn.h> 56 #include <asm/xen/hypervisor.h> 57 58 struct per_user_data { 59 struct mutex bind_mutex; /* serialize bind/unbind operations */ 60 struct rb_root evtchns; 61 62 /* Notification ring, accessed via /dev/xen/evtchn. */ 63 #define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t)) 64 #define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1)) 65 evtchn_port_t *ring; 66 unsigned int ring_cons, ring_prod, ring_overflow; 67 struct mutex ring_cons_mutex; /* protect against concurrent readers */ 68 spinlock_t ring_prod_lock; /* product against concurrent interrupts */ 69 70 /* Processes wait on this queue when ring is empty. */ 71 wait_queue_head_t evtchn_wait; 72 struct fasync_struct *evtchn_async_queue; 73 const char *name; 74 }; 75 76 struct user_evtchn { 77 struct rb_node node; 78 struct per_user_data *user; 79 unsigned port; 80 bool enabled; 81 }; 82 83 static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) 84 { 85 struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL; 86 87 while (*new) { 88 struct user_evtchn *this; 89 90 this = container_of(*new, struct user_evtchn, node); 91 92 parent = *new; 93 if (this->port < evtchn->port) 94 new = &((*new)->rb_left); 95 else if (this->port > evtchn->port) 96 new = &((*new)->rb_right); 97 else 98 return -EEXIST; 99 } 100 101 /* Add new node and rebalance tree. */ 102 rb_link_node(&evtchn->node, parent, new); 103 rb_insert_color(&evtchn->node, &u->evtchns); 104 105 return 0; 106 } 107 108 static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) 109 { 110 rb_erase(&evtchn->node, &u->evtchns); 111 kfree(evtchn); 112 } 113 114 static struct user_evtchn *find_evtchn(struct per_user_data *u, unsigned port) 115 { 116 struct rb_node *node = u->evtchns.rb_node; 117 118 while (node) { 119 struct user_evtchn *evtchn; 120 121 evtchn = container_of(node, struct user_evtchn, node); 122 123 if (evtchn->port < port) 124 node = node->rb_left; 125 else if (evtchn->port > port) 126 node = node->rb_right; 127 else 128 return evtchn; 129 } 130 return NULL; 131 } 132 133 static irqreturn_t evtchn_interrupt(int irq, void *data) 134 { 135 struct user_evtchn *evtchn = data; 136 struct per_user_data *u = evtchn->user; 137 138 WARN(!evtchn->enabled, 139 "Interrupt for port %d, but apparently not enabled; per-user %p\n", 140 evtchn->port, u); 141 142 disable_irq_nosync(irq); 143 evtchn->enabled = false; 144 145 spin_lock(&u->ring_prod_lock); 146 147 if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { 148 u->ring[EVTCHN_RING_MASK(u->ring_prod)] = evtchn->port; 149 wmb(); /* Ensure ring contents visible */ 150 if (u->ring_cons == u->ring_prod++) { 151 wake_up_interruptible(&u->evtchn_wait); 152 kill_fasync(&u->evtchn_async_queue, 153 SIGIO, POLL_IN); 154 } 155 } else 156 u->ring_overflow = 1; 157 158 spin_unlock(&u->ring_prod_lock); 159 160 return IRQ_HANDLED; 161 } 162 163 static ssize_t evtchn_read(struct file *file, char __user *buf, 164 size_t count, loff_t *ppos) 165 { 166 int rc; 167 unsigned int c, p, bytes1 = 0, bytes2 = 0; 168 struct per_user_data *u = file->private_data; 169 170 /* Whole number of ports. */ 171 count &= ~(sizeof(evtchn_port_t)-1); 172 173 if (count == 0) 174 return 0; 175 176 if (count > PAGE_SIZE) 177 count = PAGE_SIZE; 178 179 for (;;) { 180 mutex_lock(&u->ring_cons_mutex); 181 182 rc = -EFBIG; 183 if (u->ring_overflow) 184 goto unlock_out; 185 186 c = u->ring_cons; 187 p = u->ring_prod; 188 if (c != p) 189 break; 190 191 mutex_unlock(&u->ring_cons_mutex); 192 193 if (file->f_flags & O_NONBLOCK) 194 return -EAGAIN; 195 196 rc = wait_event_interruptible(u->evtchn_wait, 197 u->ring_cons != u->ring_prod); 198 if (rc) 199 return rc; 200 } 201 202 /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ 203 if (((c ^ p) & EVTCHN_RING_SIZE) != 0) { 204 bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * 205 sizeof(evtchn_port_t); 206 bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t); 207 } else { 208 bytes1 = (p - c) * sizeof(evtchn_port_t); 209 bytes2 = 0; 210 } 211 212 /* Truncate chunks according to caller's maximum byte count. */ 213 if (bytes1 > count) { 214 bytes1 = count; 215 bytes2 = 0; 216 } else if ((bytes1 + bytes2) > count) { 217 bytes2 = count - bytes1; 218 } 219 220 rc = -EFAULT; 221 rmb(); /* Ensure that we see the port before we copy it. */ 222 if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) || 223 ((bytes2 != 0) && 224 copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) 225 goto unlock_out; 226 227 u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t); 228 rc = bytes1 + bytes2; 229 230 unlock_out: 231 mutex_unlock(&u->ring_cons_mutex); 232 return rc; 233 } 234 235 static ssize_t evtchn_write(struct file *file, const char __user *buf, 236 size_t count, loff_t *ppos) 237 { 238 int rc, i; 239 evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL); 240 struct per_user_data *u = file->private_data; 241 242 if (kbuf == NULL) 243 return -ENOMEM; 244 245 /* Whole number of ports. */ 246 count &= ~(sizeof(evtchn_port_t)-1); 247 248 rc = 0; 249 if (count == 0) 250 goto out; 251 252 if (count > PAGE_SIZE) 253 count = PAGE_SIZE; 254 255 rc = -EFAULT; 256 if (copy_from_user(kbuf, buf, count) != 0) 257 goto out; 258 259 mutex_lock(&u->bind_mutex); 260 261 for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) { 262 unsigned port = kbuf[i]; 263 struct user_evtchn *evtchn; 264 265 evtchn = find_evtchn(u, port); 266 if (evtchn && !evtchn->enabled) { 267 evtchn->enabled = true; 268 enable_irq(irq_from_evtchn(port)); 269 } 270 } 271 272 mutex_unlock(&u->bind_mutex); 273 274 rc = count; 275 276 out: 277 free_page((unsigned long)kbuf); 278 return rc; 279 } 280 281 static int evtchn_bind_to_user(struct per_user_data *u, int port) 282 { 283 struct user_evtchn *evtchn; 284 struct evtchn_close close; 285 int rc = 0; 286 287 /* 288 * Ports are never reused, so every caller should pass in a 289 * unique port. 290 * 291 * (Locking not necessary because we haven't registered the 292 * interrupt handler yet, and our caller has already 293 * serialized bind operations.) 294 */ 295 296 evtchn = kzalloc(sizeof(*evtchn), GFP_KERNEL); 297 if (!evtchn) 298 return -ENOMEM; 299 300 evtchn->user = u; 301 evtchn->port = port; 302 evtchn->enabled = true; /* start enabled */ 303 304 rc = add_evtchn(u, evtchn); 305 if (rc < 0) 306 goto err; 307 308 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0, 309 u->name, evtchn); 310 if (rc < 0) 311 goto err; 312 313 rc = evtchn_make_refcounted(port); 314 return rc; 315 316 err: 317 /* bind failed, should close the port now */ 318 close.port = port; 319 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 320 BUG(); 321 del_evtchn(u, evtchn); 322 return rc; 323 } 324 325 static void evtchn_unbind_from_user(struct per_user_data *u, 326 struct user_evtchn *evtchn) 327 { 328 int irq = irq_from_evtchn(evtchn->port); 329 330 BUG_ON(irq < 0); 331 332 unbind_from_irqhandler(irq, evtchn); 333 334 del_evtchn(u, evtchn); 335 } 336 337 static long evtchn_ioctl(struct file *file, 338 unsigned int cmd, unsigned long arg) 339 { 340 int rc; 341 struct per_user_data *u = file->private_data; 342 void __user *uarg = (void __user *) arg; 343 344 /* Prevent bind from racing with unbind */ 345 mutex_lock(&u->bind_mutex); 346 347 switch (cmd) { 348 case IOCTL_EVTCHN_BIND_VIRQ: { 349 struct ioctl_evtchn_bind_virq bind; 350 struct evtchn_bind_virq bind_virq; 351 352 rc = -EFAULT; 353 if (copy_from_user(&bind, uarg, sizeof(bind))) 354 break; 355 356 bind_virq.virq = bind.virq; 357 bind_virq.vcpu = 0; 358 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 359 &bind_virq); 360 if (rc != 0) 361 break; 362 363 rc = evtchn_bind_to_user(u, bind_virq.port); 364 if (rc == 0) 365 rc = bind_virq.port; 366 break; 367 } 368 369 case IOCTL_EVTCHN_BIND_INTERDOMAIN: { 370 struct ioctl_evtchn_bind_interdomain bind; 371 struct evtchn_bind_interdomain bind_interdomain; 372 373 rc = -EFAULT; 374 if (copy_from_user(&bind, uarg, sizeof(bind))) 375 break; 376 377 bind_interdomain.remote_dom = bind.remote_domain; 378 bind_interdomain.remote_port = bind.remote_port; 379 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, 380 &bind_interdomain); 381 if (rc != 0) 382 break; 383 384 rc = evtchn_bind_to_user(u, bind_interdomain.local_port); 385 if (rc == 0) 386 rc = bind_interdomain.local_port; 387 break; 388 } 389 390 case IOCTL_EVTCHN_BIND_UNBOUND_PORT: { 391 struct ioctl_evtchn_bind_unbound_port bind; 392 struct evtchn_alloc_unbound alloc_unbound; 393 394 rc = -EFAULT; 395 if (copy_from_user(&bind, uarg, sizeof(bind))) 396 break; 397 398 alloc_unbound.dom = DOMID_SELF; 399 alloc_unbound.remote_dom = bind.remote_domain; 400 rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, 401 &alloc_unbound); 402 if (rc != 0) 403 break; 404 405 rc = evtchn_bind_to_user(u, alloc_unbound.port); 406 if (rc == 0) 407 rc = alloc_unbound.port; 408 break; 409 } 410 411 case IOCTL_EVTCHN_UNBIND: { 412 struct ioctl_evtchn_unbind unbind; 413 struct user_evtchn *evtchn; 414 415 rc = -EFAULT; 416 if (copy_from_user(&unbind, uarg, sizeof(unbind))) 417 break; 418 419 rc = -EINVAL; 420 if (unbind.port >= NR_EVENT_CHANNELS) 421 break; 422 423 rc = -ENOTCONN; 424 evtchn = find_evtchn(u, unbind.port); 425 if (!evtchn) 426 break; 427 428 disable_irq(irq_from_evtchn(unbind.port)); 429 evtchn_unbind_from_user(u, evtchn); 430 rc = 0; 431 break; 432 } 433 434 case IOCTL_EVTCHN_NOTIFY: { 435 struct ioctl_evtchn_notify notify; 436 struct user_evtchn *evtchn; 437 438 rc = -EFAULT; 439 if (copy_from_user(¬ify, uarg, sizeof(notify))) 440 break; 441 442 rc = -ENOTCONN; 443 evtchn = find_evtchn(u, notify.port); 444 if (evtchn) { 445 notify_remote_via_evtchn(notify.port); 446 rc = 0; 447 } 448 break; 449 } 450 451 case IOCTL_EVTCHN_RESET: { 452 /* Initialise the ring to empty. Clear errors. */ 453 mutex_lock(&u->ring_cons_mutex); 454 spin_lock_irq(&u->ring_prod_lock); 455 u->ring_cons = u->ring_prod = u->ring_overflow = 0; 456 spin_unlock_irq(&u->ring_prod_lock); 457 mutex_unlock(&u->ring_cons_mutex); 458 rc = 0; 459 break; 460 } 461 462 default: 463 rc = -ENOSYS; 464 break; 465 } 466 mutex_unlock(&u->bind_mutex); 467 468 return rc; 469 } 470 471 static unsigned int evtchn_poll(struct file *file, poll_table *wait) 472 { 473 unsigned int mask = POLLOUT | POLLWRNORM; 474 struct per_user_data *u = file->private_data; 475 476 poll_wait(file, &u->evtchn_wait, wait); 477 if (u->ring_cons != u->ring_prod) 478 mask |= POLLIN | POLLRDNORM; 479 if (u->ring_overflow) 480 mask = POLLERR; 481 return mask; 482 } 483 484 static int evtchn_fasync(int fd, struct file *filp, int on) 485 { 486 struct per_user_data *u = filp->private_data; 487 return fasync_helper(fd, filp, on, &u->evtchn_async_queue); 488 } 489 490 static int evtchn_open(struct inode *inode, struct file *filp) 491 { 492 struct per_user_data *u; 493 494 u = kzalloc(sizeof(*u), GFP_KERNEL); 495 if (u == NULL) 496 return -ENOMEM; 497 498 u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm); 499 if (u->name == NULL) { 500 kfree(u); 501 return -ENOMEM; 502 } 503 504 init_waitqueue_head(&u->evtchn_wait); 505 506 u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL); 507 if (u->ring == NULL) { 508 kfree(u->name); 509 kfree(u); 510 return -ENOMEM; 511 } 512 513 mutex_init(&u->bind_mutex); 514 mutex_init(&u->ring_cons_mutex); 515 spin_lock_init(&u->ring_prod_lock); 516 517 filp->private_data = u; 518 519 return nonseekable_open(inode, filp); 520 } 521 522 static int evtchn_release(struct inode *inode, struct file *filp) 523 { 524 struct per_user_data *u = filp->private_data; 525 struct rb_node *node; 526 527 while ((node = u->evtchns.rb_node)) { 528 struct user_evtchn *evtchn; 529 530 evtchn = rb_entry(node, struct user_evtchn, node); 531 disable_irq(irq_from_evtchn(evtchn->port)); 532 evtchn_unbind_from_user(u, evtchn); 533 } 534 535 free_page((unsigned long)u->ring); 536 kfree(u->name); 537 kfree(u); 538 539 return 0; 540 } 541 542 static const struct file_operations evtchn_fops = { 543 .owner = THIS_MODULE, 544 .read = evtchn_read, 545 .write = evtchn_write, 546 .unlocked_ioctl = evtchn_ioctl, 547 .poll = evtchn_poll, 548 .fasync = evtchn_fasync, 549 .open = evtchn_open, 550 .release = evtchn_release, 551 .llseek = no_llseek, 552 }; 553 554 static struct miscdevice evtchn_miscdev = { 555 .minor = MISC_DYNAMIC_MINOR, 556 .name = "xen/evtchn", 557 .fops = &evtchn_fops, 558 }; 559 static int __init evtchn_init(void) 560 { 561 int err; 562 563 if (!xen_domain()) 564 return -ENODEV; 565 566 /* Create '/dev/xen/evtchn'. */ 567 err = misc_register(&evtchn_miscdev); 568 if (err != 0) { 569 pr_err("Could not register /dev/xen/evtchn\n"); 570 return err; 571 } 572 573 pr_info("Event-channel device installed\n"); 574 575 return 0; 576 } 577 578 static void __exit evtchn_cleanup(void) 579 { 580 misc_deregister(&evtchn_miscdev); 581 } 582 583 module_init(evtchn_init); 584 module_exit(evtchn_cleanup); 585 586 MODULE_LICENSE("GPL"); 587