evtchn.c (97a2847d064e2fdd2e3cd4ff14cad2f377f0677a) | evtchn.c (73cc4bb0c79eebe1f0e92b700d9fe8d1c9b061bb) |
---|---|
1/****************************************************************************** 2 * evtchn.c 3 * 4 * Driver for receiving and demuxing event-channel signals. 5 * 6 * Copyright (c) 2004-2005, K A Fraser 7 * Multi-process extensions Copyright (c) 2004, Steven Smith 8 * --- 43 unchanged lines hidden (view full) --- 52 53#include <xen/xen.h> 54#include <xen/events.h> 55#include <xen/evtchn.h> 56#include <asm/xen/hypervisor.h> 57 58struct per_user_data { 59 struct mutex bind_mutex; /* serialize bind/unbind operations */ | 1/****************************************************************************** 2 * evtchn.c 3 * 4 * Driver for receiving and demuxing event-channel signals. 5 * 6 * Copyright (c) 2004-2005, K A Fraser 7 * Multi-process extensions Copyright (c) 2004, Steven Smith 8 * --- 43 unchanged lines hidden (view full) --- 52 53#include <xen/xen.h> 54#include <xen/events.h> 55#include <xen/evtchn.h> 56#include <asm/xen/hypervisor.h> 57 58struct per_user_data { 59 struct mutex bind_mutex; /* serialize bind/unbind operations */ |
60 struct rb_root evtchns; |
|
60 61 /* Notification ring, accessed via /dev/xen/evtchn. */ 62#define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t)) 63#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1)) 64 evtchn_port_t *ring; 65 unsigned int ring_cons, ring_prod, ring_overflow; 66 struct mutex ring_cons_mutex; /* protect against concurrent readers */ | 61 62 /* Notification ring, accessed via /dev/xen/evtchn. */ 63#define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t)) 64#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1)) 65 evtchn_port_t *ring; 66 unsigned int ring_cons, ring_prod, ring_overflow; 67 struct mutex ring_cons_mutex; /* protect against concurrent readers */ |
68 spinlock_t ring_prod_lock; /* product against concurrent interrupts */ |
|
67 68 /* Processes wait on this queue when ring is empty. */ 69 wait_queue_head_t evtchn_wait; 70 struct fasync_struct *evtchn_async_queue; 71 const char *name; 72}; 73 | 69 70 /* Processes wait on this queue when ring is empty. */ 71 wait_queue_head_t evtchn_wait; 72 struct fasync_struct *evtchn_async_queue; 73 const char *name; 74}; 75 |
74/* 75 * Who's bound to each port? This is logically an array of struct 76 * per_user_data *, but we encode the current enabled-state in bit 0. 77 */ 78static unsigned long *port_user; 79static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */ | 76struct user_evtchn { 77 struct rb_node node; 78 struct per_user_data *user; 79 unsigned port; 80 bool enabled; 81}; |
80 | 82 |
81static inline struct per_user_data *get_port_user(unsigned port) | 83static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) |
82{ | 84{ |
83 return (struct per_user_data *)(port_user[port] & ~1); 84} | 85 struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL; |
85 | 86 |
86static inline void set_port_user(unsigned port, struct per_user_data *u) 87{ 88 port_user[port] = (unsigned long)u; | 87 while (*new) { 88 struct user_evtchn *this; 89 90 this = container_of(*new, struct user_evtchn, node); 91 92 parent = *new; 93 if (this->port < evtchn->port) 94 new = &((*new)->rb_left); 95 else if (this->port > evtchn->port) 96 new = &((*new)->rb_right); 97 else 98 return -EEXIST; 99 } 100 101 /* Add new node and rebalance tree. */ 102 rb_link_node(&evtchn->node, parent, new); 103 rb_insert_color(&evtchn->node, &u->evtchns); 104 105 return 0; |
89} 90 | 106} 107 |
91static inline bool get_port_enabled(unsigned port) | 108static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) |
92{ | 109{ |
93 return port_user[port] & 1; | 110 rb_erase(&evtchn->node, &u->evtchns); 111 kfree(evtchn); |
94} 95 | 112} 113 |
96static inline void set_port_enabled(unsigned port, bool enabled) | 114static struct user_evtchn *find_evtchn(struct per_user_data *u, unsigned port) |
97{ | 115{ |
98 if (enabled) 99 port_user[port] |= 1; 100 else 101 port_user[port] &= ~1; | 116 struct rb_node *node = u->evtchns.rb_node; 117 118 while (node) { 119 struct user_evtchn *evtchn; 120 121 evtchn = container_of(node, struct user_evtchn, node); 122 123 if (evtchn->port < port) 124 node = node->rb_left; 125 else if (evtchn->port > port) 126 node = node->rb_right; 127 else 128 return evtchn; 129 } 130 return NULL; |
102} 103 104static irqreturn_t evtchn_interrupt(int irq, void *data) 105{ | 131} 132 133static irqreturn_t evtchn_interrupt(int irq, void *data) 134{ |
106 unsigned int port = (unsigned long)data; 107 struct per_user_data *u; | 135 struct user_evtchn *evtchn = data; 136 struct per_user_data *u = evtchn->user; |
108 | 137 |
109 spin_lock(&port_user_lock); 110 111 u = get_port_user(port); 112 113 WARN(!get_port_enabled(port), | 138 WARN(!evtchn->enabled, |
114 "Interrupt for port %d, but apparently not enabled; per-user %p\n", | 139 "Interrupt for port %d, but apparently not enabled; per-user %p\n", |
115 port, u); | 140 evtchn->port, u); |
116 117 disable_irq_nosync(irq); | 141 142 disable_irq_nosync(irq); |
118 set_port_enabled(port, false); | 143 evtchn->enabled = false; |
119 | 144 |
145 spin_lock(&u->ring_prod_lock); 146 |
|
120 if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { | 147 if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { |
121 u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port; | 148 u->ring[EVTCHN_RING_MASK(u->ring_prod)] = evtchn->port; |
122 wmb(); /* Ensure ring contents visible */ 123 if (u->ring_cons == u->ring_prod++) { 124 wake_up_interruptible(&u->evtchn_wait); 125 kill_fasync(&u->evtchn_async_queue, 126 SIGIO, POLL_IN); 127 } 128 } else 129 u->ring_overflow = 1; 130 | 149 wmb(); /* Ensure ring contents visible */ 150 if (u->ring_cons == u->ring_prod++) { 151 wake_up_interruptible(&u->evtchn_wait); 152 kill_fasync(&u->evtchn_async_queue, 153 SIGIO, POLL_IN); 154 } 155 } else 156 u->ring_overflow = 1; 157 |
131 spin_unlock(&port_user_lock); | 158 spin_unlock(&u->ring_prod_lock); |
132 133 return IRQ_HANDLED; 134} 135 136static ssize_t evtchn_read(struct file *file, char __user *buf, 137 size_t count, loff_t *ppos) 138{ 139 int rc; --- 84 unchanged lines hidden (view full) --- 224 225 if (count > PAGE_SIZE) 226 count = PAGE_SIZE; 227 228 rc = -EFAULT; 229 if (copy_from_user(kbuf, buf, count) != 0) 230 goto out; 231 | 159 160 return IRQ_HANDLED; 161} 162 163static ssize_t evtchn_read(struct file *file, char __user *buf, 164 size_t count, loff_t *ppos) 165{ 166 int rc; --- 84 unchanged lines hidden (view full) --- 251 252 if (count > PAGE_SIZE) 253 count = PAGE_SIZE; 254 255 rc = -EFAULT; 256 if (copy_from_user(kbuf, buf, count) != 0) 257 goto out; 258 |
232 spin_lock_irq(&port_user_lock); | 259 mutex_lock(&u->bind_mutex); |
233 234 for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) { 235 unsigned port = kbuf[i]; | 260 261 for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) { 262 unsigned port = kbuf[i]; |
263 struct user_evtchn *evtchn; |
|
236 | 264 |
237 if (port < NR_EVENT_CHANNELS && 238 get_port_user(port) == u && 239 !get_port_enabled(port)) { 240 set_port_enabled(port, true); | 265 evtchn = find_evtchn(u, port); 266 if (evtchn && !evtchn->enabled) { 267 evtchn->enabled = true; |
241 enable_irq(irq_from_evtchn(port)); 242 } 243 } 244 | 268 enable_irq(irq_from_evtchn(port)); 269 } 270 } 271 |
245 spin_unlock_irq(&port_user_lock); | 272 mutex_unlock(&u->bind_mutex); |
246 247 rc = count; 248 249 out: 250 free_page((unsigned long)kbuf); 251 return rc; 252} 253 254static int evtchn_bind_to_user(struct per_user_data *u, int port) 255{ | 273 274 rc = count; 275 276 out: 277 free_page((unsigned long)kbuf); 278 return rc; 279} 280 281static int evtchn_bind_to_user(struct per_user_data *u, int port) 282{ |
283 struct user_evtchn *evtchn; 284 struct evtchn_close close; |
|
256 int rc = 0; 257 258 /* 259 * Ports are never reused, so every caller should pass in a 260 * unique port. 261 * 262 * (Locking not necessary because we haven't registered the 263 * interrupt handler yet, and our caller has already 264 * serialized bind operations.) 265 */ | 285 int rc = 0; 286 287 /* 288 * Ports are never reused, so every caller should pass in a 289 * unique port. 290 * 291 * (Locking not necessary because we haven't registered the 292 * interrupt handler yet, and our caller has already 293 * serialized bind operations.) 294 */ |
266 BUG_ON(get_port_user(port) != NULL); 267 set_port_user(port, u); 268 set_port_enabled(port, true); /* start enabled */ | |
269 | 295 |
296 evtchn = kzalloc(sizeof(*evtchn), GFP_KERNEL); 297 if (!evtchn) 298 return -ENOMEM; 299 300 evtchn->user = u; 301 evtchn->port = port; 302 evtchn->enabled = true; /* start enabled */ 303 304 rc = add_evtchn(u, evtchn); 305 if (rc < 0) 306 goto err; 307 |
|
270 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, | 308 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, |
271 u->name, (void *)(unsigned long)port); 272 if (rc >= 0) 273 rc = evtchn_make_refcounted(port); 274 else { 275 /* bind failed, should close the port now */ 276 struct evtchn_close close; 277 close.port = port; 278 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 279 BUG(); 280 set_port_user(port, NULL); 281 } | 309 u->name, evtchn); 310 if (rc < 0) 311 goto err; |
282 | 312 |
313 rc = evtchn_make_refcounted(port); |
|
283 return rc; | 314 return rc; |
315 316err: 317 /* bind failed, should close the port now */ 318 close.port = port; 319 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 320 BUG(); 321 del_evtchn(u, evtchn); 322 kfree(evtchn); 323 return rc; |
|
284} 285 | 324} 325 |
286static void evtchn_unbind_from_user(struct per_user_data *u, int port) | 326static void evtchn_unbind_from_user(struct per_user_data *u, 327 struct user_evtchn *evtchn) |
287{ | 328{ |
288 int irq = irq_from_evtchn(port); | 329 int irq = irq_from_evtchn(evtchn->port); |
289 290 BUG_ON(irq < 0); 291 | 330 331 BUG_ON(irq < 0); 332 |
292 unbind_from_irqhandler(irq, (void *)(unsigned long)port); | 333 unbind_from_irqhandler(irq, evtchn); |
293 | 334 |
294 set_port_user(port, NULL); | 335 del_evtchn(u, evtchn); |
295} 296 297static long evtchn_ioctl(struct file *file, 298 unsigned int cmd, unsigned long arg) 299{ 300 int rc; 301 struct per_user_data *u = file->private_data; 302 void __user *uarg = (void __user *) arg; --- 62 unchanged lines hidden (view full) --- 365 rc = evtchn_bind_to_user(u, alloc_unbound.port); 366 if (rc == 0) 367 rc = alloc_unbound.port; 368 break; 369 } 370 371 case IOCTL_EVTCHN_UNBIND: { 372 struct ioctl_evtchn_unbind unbind; | 336} 337 338static long evtchn_ioctl(struct file *file, 339 unsigned int cmd, unsigned long arg) 340{ 341 int rc; 342 struct per_user_data *u = file->private_data; 343 void __user *uarg = (void __user *) arg; --- 62 unchanged lines hidden (view full) --- 406 rc = evtchn_bind_to_user(u, alloc_unbound.port); 407 if (rc == 0) 408 rc = alloc_unbound.port; 409 break; 410 } 411 412 case IOCTL_EVTCHN_UNBIND: { 413 struct ioctl_evtchn_unbind unbind; |
414 struct user_evtchn *evtchn; |
|
373 374 rc = -EFAULT; 375 if (copy_from_user(&unbind, uarg, sizeof(unbind))) 376 break; 377 378 rc = -EINVAL; 379 if (unbind.port >= NR_EVENT_CHANNELS) 380 break; 381 382 rc = -ENOTCONN; | 415 416 rc = -EFAULT; 417 if (copy_from_user(&unbind, uarg, sizeof(unbind))) 418 break; 419 420 rc = -EINVAL; 421 if (unbind.port >= NR_EVENT_CHANNELS) 422 break; 423 424 rc = -ENOTCONN; |
383 if (get_port_user(unbind.port) != u) | 425 evtchn = find_evtchn(u, unbind.port); 426 if (!evtchn) |
384 break; 385 386 disable_irq(irq_from_evtchn(unbind.port)); | 427 break; 428 429 disable_irq(irq_from_evtchn(unbind.port)); |
387 388 evtchn_unbind_from_user(u, unbind.port); 389 | 430 evtchn_unbind_from_user(u, evtchn); |
390 rc = 0; 391 break; 392 } 393 394 case IOCTL_EVTCHN_NOTIFY: { 395 struct ioctl_evtchn_notify notify; | 431 rc = 0; 432 break; 433 } 434 435 case IOCTL_EVTCHN_NOTIFY: { 436 struct ioctl_evtchn_notify notify; |
437 struct user_evtchn *evtchn; |
|
396 397 rc = -EFAULT; 398 if (copy_from_user(¬ify, uarg, sizeof(notify))) 399 break; 400 | 438 439 rc = -EFAULT; 440 if (copy_from_user(¬ify, uarg, sizeof(notify))) 441 break; 442 |
401 if (notify.port >= NR_EVENT_CHANNELS) { 402 rc = -EINVAL; 403 } else if (get_port_user(notify.port) != u) { 404 rc = -ENOTCONN; 405 } else { | 443 rc = -ENOTCONN; 444 evtchn = find_evtchn(u, notify.port); 445 if (evtchn) { |
406 notify_remote_via_evtchn(notify.port); 407 rc = 0; 408 } 409 break; 410 } 411 412 case IOCTL_EVTCHN_RESET: { 413 /* Initialise the ring to empty. Clear errors. */ 414 mutex_lock(&u->ring_cons_mutex); | 446 notify_remote_via_evtchn(notify.port); 447 rc = 0; 448 } 449 break; 450 } 451 452 case IOCTL_EVTCHN_RESET: { 453 /* Initialise the ring to empty. Clear errors. */ 454 mutex_lock(&u->ring_cons_mutex); |
415 spin_lock_irq(&port_user_lock); | 455 spin_lock_irq(&u->ring_prod_lock); |
416 u->ring_cons = u->ring_prod = u->ring_overflow = 0; | 456 u->ring_cons = u->ring_prod = u->ring_overflow = 0; |
417 spin_unlock_irq(&port_user_lock); | 457 spin_unlock_irq(&u->ring_prod_lock); |
418 mutex_unlock(&u->ring_cons_mutex); 419 rc = 0; 420 break; 421 } 422 423 default: 424 rc = -ENOSYS; 425 break; --- 42 unchanged lines hidden (view full) --- 468 if (u->ring == NULL) { 469 kfree(u->name); 470 kfree(u); 471 return -ENOMEM; 472 } 473 474 mutex_init(&u->bind_mutex); 475 mutex_init(&u->ring_cons_mutex); | 458 mutex_unlock(&u->ring_cons_mutex); 459 rc = 0; 460 break; 461 } 462 463 default: 464 rc = -ENOSYS; 465 break; --- 42 unchanged lines hidden (view full) --- 508 if (u->ring == NULL) { 509 kfree(u->name); 510 kfree(u); 511 return -ENOMEM; 512 } 513 514 mutex_init(&u->bind_mutex); 515 mutex_init(&u->ring_cons_mutex); |
516 spin_lock_init(&u->ring_prod_lock); |
|
476 477 filp->private_data = u; 478 479 return nonseekable_open(inode, filp); 480} 481 482static int evtchn_release(struct inode *inode, struct file *filp) 483{ | 517 518 filp->private_data = u; 519 520 return nonseekable_open(inode, filp); 521} 522 523static int evtchn_release(struct inode *inode, struct file *filp) 524{ |
484 int i; | |
485 struct per_user_data *u = filp->private_data; | 525 struct per_user_data *u = filp->private_data; |
526 struct rb_node *node; |
|
486 | 527 |
487 for (i = 0; i < NR_EVENT_CHANNELS; i++) { 488 if (get_port_user(i) != u) 489 continue; | 528 while ((node = u->evtchns.rb_node)) { 529 struct user_evtchn *evtchn; |
490 | 530 |
491 disable_irq(irq_from_evtchn(i)); 492 evtchn_unbind_from_user(get_port_user(i), i); | 531 evtchn = rb_entry(node, struct user_evtchn, node); 532 disable_irq(irq_from_evtchn(evtchn->port)); 533 evtchn_unbind_from_user(u, evtchn); |
493 } 494 495 free_page((unsigned long)u->ring); 496 kfree(u->name); 497 kfree(u); 498 499 return 0; 500} --- 17 unchanged lines hidden (view full) --- 518}; 519static int __init evtchn_init(void) 520{ 521 int err; 522 523 if (!xen_domain()) 524 return -ENODEV; 525 | 534 } 535 536 free_page((unsigned long)u->ring); 537 kfree(u->name); 538 kfree(u); 539 540 return 0; 541} --- 17 unchanged lines hidden (view full) --- 559}; 560static int __init evtchn_init(void) 561{ 562 int err; 563 564 if (!xen_domain()) 565 return -ENODEV; 566 |
526 port_user = kcalloc(NR_EVENT_CHANNELS, sizeof(*port_user), GFP_KERNEL); 527 if (port_user == NULL) 528 return -ENOMEM; 529 530 spin_lock_init(&port_user_lock); 531 | |
532 /* Create '/dev/xen/evtchn'. */ 533 err = misc_register(&evtchn_miscdev); 534 if (err != 0) { 535 pr_err("Could not register /dev/xen/evtchn\n"); 536 return err; 537 } 538 539 pr_info("Event-channel device installed\n"); 540 541 return 0; 542} 543 544static void __exit evtchn_cleanup(void) 545{ | 567 /* Create '/dev/xen/evtchn'. */ 568 err = misc_register(&evtchn_miscdev); 569 if (err != 0) { 570 pr_err("Could not register /dev/xen/evtchn\n"); 571 return err; 572 } 573 574 pr_info("Event-channel device installed\n"); 575 576 return 0; 577} 578 579static void __exit evtchn_cleanup(void) 580{ |
546 kfree(port_user); 547 port_user = NULL; 548 | |
549 misc_deregister(&evtchn_miscdev); 550} 551 552module_init(evtchn_init); 553module_exit(evtchn_cleanup); 554 555MODULE_LICENSE("GPL"); | 581 misc_deregister(&evtchn_miscdev); 582} 583 584module_init(evtchn_init); 585module_exit(evtchn_cleanup); 586 587MODULE_LICENSE("GPL"); |