1 /****************************************************************************** 2 * evtchn.c 3 * 4 * Driver for receiving and demuxing event-channel signals. 5 * 6 * Copyright (c) 2004-2005, K A Fraser 7 * Multi-process extensions Copyright (c) 2004, Steven Smith 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License version 2 11 * as published by the Free Software Foundation; or, when distributed 12 * separately from the Linux kernel or incorporated into other 13 * software packages, subject to the following license: 14 * 15 * Permission is hereby granted, free of charge, to any person obtaining a copy 16 * of this source file (the "Software"), to deal in the Software without 17 * restriction, including without limitation the rights to use, copy, modify, 18 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 19 * and to permit persons to whom the Software is furnished to do so, subject to 20 * the following conditions: 21 * 22 * The above copyright notice and this permission notice shall be included in 23 * all copies or substantial portions of the Software. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 31 * IN THE SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 #include <linux/kernel.h> 36 #include <linux/sched.h> 37 #include <linux/slab.h> 38 #include <linux/string.h> 39 #include <linux/errno.h> 40 #include <linux/fs.h> 41 #include <linux/miscdevice.h> 42 #include <linux/major.h> 43 #include <linux/proc_fs.h> 44 #include <linux/stat.h> 45 #include <linux/poll.h> 46 #include <linux/irq.h> 47 #include <linux/init.h> 48 #include <linux/mutex.h> 49 #include <linux/cpu.h> 50 51 #include <xen/xen.h> 52 #include <xen/events.h> 53 #include <xen/evtchn.h> 54 #include <asm/xen/hypervisor.h> 55 56 struct per_user_data { 57 struct mutex bind_mutex; /* serialize bind/unbind operations */ 58 59 /* Notification ring, accessed via /dev/xen/evtchn. */ 60 #define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t)) 61 #define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1)) 62 evtchn_port_t *ring; 63 unsigned int ring_cons, ring_prod, ring_overflow; 64 struct mutex ring_cons_mutex; /* protect against concurrent readers */ 65 66 /* Processes wait on this queue when ring is empty. */ 67 wait_queue_head_t evtchn_wait; 68 struct fasync_struct *evtchn_async_queue; 69 const char *name; 70 }; 71 72 /* 73 * Who's bound to each port? This is logically an array of struct 74 * per_user_data *, but we encode the current enabled-state in bit 0. 75 */ 76 static unsigned long *port_user; 77 static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */ 78 79 static inline struct per_user_data *get_port_user(unsigned port) 80 { 81 return (struct per_user_data *)(port_user[port] & ~1); 82 } 83 84 static inline void set_port_user(unsigned port, struct per_user_data *u) 85 { 86 port_user[port] = (unsigned long)u; 87 } 88 89 static inline bool get_port_enabled(unsigned port) 90 { 91 return port_user[port] & 1; 92 } 93 94 static inline void set_port_enabled(unsigned port, bool enabled) 95 { 96 if (enabled) 97 port_user[port] |= 1; 98 else 99 port_user[port] &= ~1; 100 } 101 102 static irqreturn_t evtchn_interrupt(int irq, void *data) 103 { 104 unsigned int port = (unsigned long)data; 105 struct per_user_data *u; 106 107 spin_lock(&port_user_lock); 108 109 u = get_port_user(port); 110 111 WARN(!get_port_enabled(port), 112 "Interrupt for port %d, but apparently not enabled; per-user %p\n", 113 port, u); 114 115 disable_irq_nosync(irq); 116 set_port_enabled(port, false); 117 118 if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { 119 u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port; 120 wmb(); /* Ensure ring contents visible */ 121 if (u->ring_cons == u->ring_prod++) { 122 wake_up_interruptible(&u->evtchn_wait); 123 kill_fasync(&u->evtchn_async_queue, 124 SIGIO, POLL_IN); 125 } 126 } else 127 u->ring_overflow = 1; 128 129 spin_unlock(&port_user_lock); 130 131 return IRQ_HANDLED; 132 } 133 134 static ssize_t evtchn_read(struct file *file, char __user *buf, 135 size_t count, loff_t *ppos) 136 { 137 int rc; 138 unsigned int c, p, bytes1 = 0, bytes2 = 0; 139 struct per_user_data *u = file->private_data; 140 141 /* Whole number of ports. */ 142 count &= ~(sizeof(evtchn_port_t)-1); 143 144 if (count == 0) 145 return 0; 146 147 if (count > PAGE_SIZE) 148 count = PAGE_SIZE; 149 150 for (;;) { 151 mutex_lock(&u->ring_cons_mutex); 152 153 rc = -EFBIG; 154 if (u->ring_overflow) 155 goto unlock_out; 156 157 c = u->ring_cons; 158 p = u->ring_prod; 159 if (c != p) 160 break; 161 162 mutex_unlock(&u->ring_cons_mutex); 163 164 if (file->f_flags & O_NONBLOCK) 165 return -EAGAIN; 166 167 rc = wait_event_interruptible(u->evtchn_wait, 168 u->ring_cons != u->ring_prod); 169 if (rc) 170 return rc; 171 } 172 173 /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ 174 if (((c ^ p) & EVTCHN_RING_SIZE) != 0) { 175 bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * 176 sizeof(evtchn_port_t); 177 bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t); 178 } else { 179 bytes1 = (p - c) * sizeof(evtchn_port_t); 180 bytes2 = 0; 181 } 182 183 /* Truncate chunks according to caller's maximum byte count. */ 184 if (bytes1 > count) { 185 bytes1 = count; 186 bytes2 = 0; 187 } else if ((bytes1 + bytes2) > count) { 188 bytes2 = count - bytes1; 189 } 190 191 rc = -EFAULT; 192 rmb(); /* Ensure that we see the port before we copy it. */ 193 if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) || 194 ((bytes2 != 0) && 195 copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) 196 goto unlock_out; 197 198 u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t); 199 rc = bytes1 + bytes2; 200 201 unlock_out: 202 mutex_unlock(&u->ring_cons_mutex); 203 return rc; 204 } 205 206 static ssize_t evtchn_write(struct file *file, const char __user *buf, 207 size_t count, loff_t *ppos) 208 { 209 int rc, i; 210 evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL); 211 struct per_user_data *u = file->private_data; 212 213 if (kbuf == NULL) 214 return -ENOMEM; 215 216 /* Whole number of ports. */ 217 count &= ~(sizeof(evtchn_port_t)-1); 218 219 rc = 0; 220 if (count == 0) 221 goto out; 222 223 if (count > PAGE_SIZE) 224 count = PAGE_SIZE; 225 226 rc = -EFAULT; 227 if (copy_from_user(kbuf, buf, count) != 0) 228 goto out; 229 230 spin_lock_irq(&port_user_lock); 231 232 for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) { 233 unsigned port = kbuf[i]; 234 235 if (port < NR_EVENT_CHANNELS && 236 get_port_user(port) == u && 237 !get_port_enabled(port)) { 238 set_port_enabled(port, true); 239 enable_irq(irq_from_evtchn(port)); 240 } 241 } 242 243 spin_unlock_irq(&port_user_lock); 244 245 rc = count; 246 247 out: 248 free_page((unsigned long)kbuf); 249 return rc; 250 } 251 252 static int evtchn_bind_to_user(struct per_user_data *u, int port) 253 { 254 int rc = 0; 255 256 /* 257 * Ports are never reused, so every caller should pass in a 258 * unique port. 259 * 260 * (Locking not necessary because we haven't registered the 261 * interrupt handler yet, and our caller has already 262 * serialized bind operations.) 263 */ 264 BUG_ON(get_port_user(port) != NULL); 265 set_port_user(port, u); 266 set_port_enabled(port, true); /* start enabled */ 267 268 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, 269 u->name, (void *)(unsigned long)port); 270 if (rc >= 0) 271 rc = 0; 272 273 return rc; 274 } 275 276 static void evtchn_unbind_from_user(struct per_user_data *u, int port) 277 { 278 int irq = irq_from_evtchn(port); 279 280 unbind_from_irqhandler(irq, (void *)(unsigned long)port); 281 282 set_port_user(port, NULL); 283 } 284 285 static long evtchn_ioctl(struct file *file, 286 unsigned int cmd, unsigned long arg) 287 { 288 int rc; 289 struct per_user_data *u = file->private_data; 290 void __user *uarg = (void __user *) arg; 291 292 /* Prevent bind from racing with unbind */ 293 mutex_lock(&u->bind_mutex); 294 295 switch (cmd) { 296 case IOCTL_EVTCHN_BIND_VIRQ: { 297 struct ioctl_evtchn_bind_virq bind; 298 struct evtchn_bind_virq bind_virq; 299 300 rc = -EFAULT; 301 if (copy_from_user(&bind, uarg, sizeof(bind))) 302 break; 303 304 bind_virq.virq = bind.virq; 305 bind_virq.vcpu = 0; 306 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 307 &bind_virq); 308 if (rc != 0) 309 break; 310 311 rc = evtchn_bind_to_user(u, bind_virq.port); 312 if (rc == 0) 313 rc = bind_virq.port; 314 break; 315 } 316 317 case IOCTL_EVTCHN_BIND_INTERDOMAIN: { 318 struct ioctl_evtchn_bind_interdomain bind; 319 struct evtchn_bind_interdomain bind_interdomain; 320 321 rc = -EFAULT; 322 if (copy_from_user(&bind, uarg, sizeof(bind))) 323 break; 324 325 bind_interdomain.remote_dom = bind.remote_domain; 326 bind_interdomain.remote_port = bind.remote_port; 327 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, 328 &bind_interdomain); 329 if (rc != 0) 330 break; 331 332 rc = evtchn_bind_to_user(u, bind_interdomain.local_port); 333 if (rc == 0) 334 rc = bind_interdomain.local_port; 335 break; 336 } 337 338 case IOCTL_EVTCHN_BIND_UNBOUND_PORT: { 339 struct ioctl_evtchn_bind_unbound_port bind; 340 struct evtchn_alloc_unbound alloc_unbound; 341 342 rc = -EFAULT; 343 if (copy_from_user(&bind, uarg, sizeof(bind))) 344 break; 345 346 alloc_unbound.dom = DOMID_SELF; 347 alloc_unbound.remote_dom = bind.remote_domain; 348 rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, 349 &alloc_unbound); 350 if (rc != 0) 351 break; 352 353 rc = evtchn_bind_to_user(u, alloc_unbound.port); 354 if (rc == 0) 355 rc = alloc_unbound.port; 356 break; 357 } 358 359 case IOCTL_EVTCHN_UNBIND: { 360 struct ioctl_evtchn_unbind unbind; 361 362 rc = -EFAULT; 363 if (copy_from_user(&unbind, uarg, sizeof(unbind))) 364 break; 365 366 rc = -EINVAL; 367 if (unbind.port >= NR_EVENT_CHANNELS) 368 break; 369 370 spin_lock_irq(&port_user_lock); 371 372 rc = -ENOTCONN; 373 if (get_port_user(unbind.port) != u) { 374 spin_unlock_irq(&port_user_lock); 375 break; 376 } 377 378 disable_irq(irq_from_evtchn(unbind.port)); 379 380 spin_unlock_irq(&port_user_lock); 381 382 evtchn_unbind_from_user(u, unbind.port); 383 384 rc = 0; 385 break; 386 } 387 388 case IOCTL_EVTCHN_NOTIFY: { 389 struct ioctl_evtchn_notify notify; 390 391 rc = -EFAULT; 392 if (copy_from_user(¬ify, uarg, sizeof(notify))) 393 break; 394 395 if (notify.port >= NR_EVENT_CHANNELS) { 396 rc = -EINVAL; 397 } else if (get_port_user(notify.port) != u) { 398 rc = -ENOTCONN; 399 } else { 400 notify_remote_via_evtchn(notify.port); 401 rc = 0; 402 } 403 break; 404 } 405 406 case IOCTL_EVTCHN_RESET: { 407 /* Initialise the ring to empty. Clear errors. */ 408 mutex_lock(&u->ring_cons_mutex); 409 spin_lock_irq(&port_user_lock); 410 u->ring_cons = u->ring_prod = u->ring_overflow = 0; 411 spin_unlock_irq(&port_user_lock); 412 mutex_unlock(&u->ring_cons_mutex); 413 rc = 0; 414 break; 415 } 416 417 default: 418 rc = -ENOSYS; 419 break; 420 } 421 mutex_unlock(&u->bind_mutex); 422 423 return rc; 424 } 425 426 static unsigned int evtchn_poll(struct file *file, poll_table *wait) 427 { 428 unsigned int mask = POLLOUT | POLLWRNORM; 429 struct per_user_data *u = file->private_data; 430 431 poll_wait(file, &u->evtchn_wait, wait); 432 if (u->ring_cons != u->ring_prod) 433 mask |= POLLIN | POLLRDNORM; 434 if (u->ring_overflow) 435 mask = POLLERR; 436 return mask; 437 } 438 439 static int evtchn_fasync(int fd, struct file *filp, int on) 440 { 441 struct per_user_data *u = filp->private_data; 442 return fasync_helper(fd, filp, on, &u->evtchn_async_queue); 443 } 444 445 static int evtchn_open(struct inode *inode, struct file *filp) 446 { 447 struct per_user_data *u; 448 449 u = kzalloc(sizeof(*u), GFP_KERNEL); 450 if (u == NULL) 451 return -ENOMEM; 452 453 u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm); 454 if (u->name == NULL) { 455 kfree(u); 456 return -ENOMEM; 457 } 458 459 init_waitqueue_head(&u->evtchn_wait); 460 461 u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL); 462 if (u->ring == NULL) { 463 kfree(u->name); 464 kfree(u); 465 return -ENOMEM; 466 } 467 468 mutex_init(&u->bind_mutex); 469 mutex_init(&u->ring_cons_mutex); 470 471 filp->private_data = u; 472 473 return nonseekable_open(inode, filp);; 474 } 475 476 static int evtchn_release(struct inode *inode, struct file *filp) 477 { 478 int i; 479 struct per_user_data *u = filp->private_data; 480 481 spin_lock_irq(&port_user_lock); 482 483 free_page((unsigned long)u->ring); 484 485 for (i = 0; i < NR_EVENT_CHANNELS; i++) { 486 if (get_port_user(i) != u) 487 continue; 488 489 disable_irq(irq_from_evtchn(i)); 490 } 491 492 spin_unlock_irq(&port_user_lock); 493 494 for (i = 0; i < NR_EVENT_CHANNELS; i++) { 495 if (get_port_user(i) != u) 496 continue; 497 498 evtchn_unbind_from_user(get_port_user(i), i); 499 } 500 501 kfree(u->name); 502 kfree(u); 503 504 return 0; 505 } 506 507 static const struct file_operations evtchn_fops = { 508 .owner = THIS_MODULE, 509 .read = evtchn_read, 510 .write = evtchn_write, 511 .unlocked_ioctl = evtchn_ioctl, 512 .poll = evtchn_poll, 513 .fasync = evtchn_fasync, 514 .open = evtchn_open, 515 .release = evtchn_release, 516 .llseek = no_llseek, 517 }; 518 519 static struct miscdevice evtchn_miscdev = { 520 .minor = MISC_DYNAMIC_MINOR, 521 .name = "xen/evtchn", 522 .fops = &evtchn_fops, 523 }; 524 static int __init evtchn_init(void) 525 { 526 int err; 527 528 if (!xen_domain()) 529 return -ENODEV; 530 531 port_user = kcalloc(NR_EVENT_CHANNELS, sizeof(*port_user), GFP_KERNEL); 532 if (port_user == NULL) 533 return -ENOMEM; 534 535 spin_lock_init(&port_user_lock); 536 537 /* Create '/dev/misc/evtchn'. */ 538 err = misc_register(&evtchn_miscdev); 539 if (err != 0) { 540 printk(KERN_ALERT "Could not register /dev/misc/evtchn\n"); 541 return err; 542 } 543 544 printk(KERN_INFO "Event-channel device installed.\n"); 545 546 return 0; 547 } 548 549 static void __exit evtchn_cleanup(void) 550 { 551 kfree(port_user); 552 port_user = NULL; 553 554 misc_deregister(&evtchn_miscdev); 555 } 556 557 module_init(evtchn_init); 558 module_exit(evtchn_cleanup); 559 560 MODULE_LICENSE("GPL"); 561