1 /* 2 * Driver giving user-space access to the kernel's xenbus connection 3 * to xenstore. 4 * 5 * Copyright (c) 2005, Christian Limpach 6 * Copyright (c) 2005, Rusty Russell, IBM Corporation 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License version 2 10 * as published by the Free Software Foundation; or, when distributed 11 * separately from the Linux kernel or incorporated into other 12 * software packages, subject to the following license: 13 * 14 * Permission is hereby granted, free of charge, to any person obtaining a copy 15 * of this source file (the "Software"), to deal in the Software without 16 * restriction, including without limitation the rights to use, copy, modify, 17 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 18 * and to permit persons to whom the Software is furnished to do so, subject to 19 * the following conditions: 20 * 21 * The above copyright notice and this permission notice shall be included in 22 * all copies or substantial portions of the Software. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 30 * IN THE SOFTWARE. 31 * 32 * Changes: 33 * 2008-10-07 Alex Zeffertt Replaced /proc/xen/xenbus with xenfs filesystem 34 * and /proc/xen compatibility mount point. 35 * Turned xenfs into a loadable module. 36 */ 37 38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 39 40 #include <linux/kernel.h> 41 #include <linux/errno.h> 42 #include <linux/uio.h> 43 #include <linux/notifier.h> 44 #include <linux/wait.h> 45 #include <linux/fs.h> 46 #include <linux/poll.h> 47 #include <linux/mutex.h> 48 #include <linux/sched.h> 49 #include <linux/spinlock.h> 50 #include <linux/mount.h> 51 #include <linux/pagemap.h> 52 #include <linux/uaccess.h> 53 #include <linux/init.h> 54 #include <linux/namei.h> 55 #include <linux/string.h> 56 #include <linux/slab.h> 57 #include <linux/miscdevice.h> 58 #include <linux/init.h> 59 60 #include <xen/xenbus.h> 61 #include <xen/xen.h> 62 #include <asm/xen/hypervisor.h> 63 64 #include "xenbus.h" 65 66 /* 67 * An element of a list of outstanding transactions, for which we're 68 * still waiting a reply. 69 */ 70 struct xenbus_transaction_holder { 71 struct list_head list; 72 struct xenbus_transaction handle; 73 }; 74 75 /* 76 * A buffer of data on the queue. 77 */ 78 struct read_buffer { 79 struct list_head list; 80 unsigned int cons; 81 unsigned int len; 82 char msg[]; 83 }; 84 85 struct xenbus_file_priv { 86 /* 87 * msgbuffer_mutex is held while partial requests are built up 88 * and complete requests are acted on. It therefore protects 89 * the "transactions" and "watches" lists, and the partial 90 * request length and buffer. 91 * 92 * reply_mutex protects the reply being built up to return to 93 * usermode. It nests inside msgbuffer_mutex but may be held 94 * alone during a watch callback. 95 */ 96 struct mutex msgbuffer_mutex; 97 98 /* In-progress transactions */ 99 struct list_head transactions; 100 101 /* Active watches. */ 102 struct list_head watches; 103 104 /* Partial request. */ 105 unsigned int len; 106 union { 107 struct xsd_sockmsg msg; 108 char buffer[XENSTORE_PAYLOAD_MAX]; 109 } u; 110 111 /* Response queue. */ 112 struct mutex reply_mutex; 113 struct list_head read_buffers; 114 wait_queue_head_t read_waitq; 115 116 struct kref kref; 117 }; 118 119 /* Read out any raw xenbus messages queued up. */ 120 static ssize_t xenbus_file_read(struct file *filp, 121 char __user *ubuf, 122 size_t len, loff_t *ppos) 123 { 124 struct xenbus_file_priv *u = filp->private_data; 125 struct read_buffer *rb; 126 unsigned i; 127 int ret; 128 129 mutex_lock(&u->reply_mutex); 130 again: 131 while (list_empty(&u->read_buffers)) { 132 mutex_unlock(&u->reply_mutex); 133 if (filp->f_flags & O_NONBLOCK) 134 return -EAGAIN; 135 136 ret = wait_event_interruptible(u->read_waitq, 137 !list_empty(&u->read_buffers)); 138 if (ret) 139 return ret; 140 mutex_lock(&u->reply_mutex); 141 } 142 143 rb = list_entry(u->read_buffers.next, struct read_buffer, list); 144 i = 0; 145 while (i < len) { 146 unsigned sz = min((unsigned)len - i, rb->len - rb->cons); 147 148 ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz); 149 150 i += sz - ret; 151 rb->cons += sz - ret; 152 153 if (ret != 0) { 154 if (i == 0) 155 i = -EFAULT; 156 goto out; 157 } 158 159 /* Clear out buffer if it has been consumed */ 160 if (rb->cons == rb->len) { 161 list_del(&rb->list); 162 kfree(rb); 163 if (list_empty(&u->read_buffers)) 164 break; 165 rb = list_entry(u->read_buffers.next, 166 struct read_buffer, list); 167 } 168 } 169 if (i == 0) 170 goto again; 171 172 out: 173 mutex_unlock(&u->reply_mutex); 174 return i; 175 } 176 177 /* 178 * Add a buffer to the queue. Caller must hold the appropriate lock 179 * if the queue is not local. (Commonly the caller will build up 180 * multiple queued buffers on a temporary local list, and then add it 181 * to the appropriate list under lock once all the buffers have een 182 * successfully allocated.) 183 */ 184 static int queue_reply(struct list_head *queue, const void *data, size_t len) 185 { 186 struct read_buffer *rb; 187 188 if (len == 0) 189 return 0; 190 if (len > XENSTORE_PAYLOAD_MAX) 191 return -EINVAL; 192 193 rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); 194 if (rb == NULL) 195 return -ENOMEM; 196 197 rb->cons = 0; 198 rb->len = len; 199 200 memcpy(rb->msg, data, len); 201 202 list_add_tail(&rb->list, queue); 203 return 0; 204 } 205 206 /* 207 * Free all the read_buffer s on a list. 208 * Caller must have sole reference to list. 209 */ 210 static void queue_cleanup(struct list_head *list) 211 { 212 struct read_buffer *rb; 213 214 while (!list_empty(list)) { 215 rb = list_entry(list->next, struct read_buffer, list); 216 list_del(list->next); 217 kfree(rb); 218 } 219 } 220 221 struct watch_adapter { 222 struct list_head list; 223 struct xenbus_watch watch; 224 struct xenbus_file_priv *dev_data; 225 char *token; 226 }; 227 228 static void free_watch_adapter(struct watch_adapter *watch) 229 { 230 kfree(watch->watch.node); 231 kfree(watch->token); 232 kfree(watch); 233 } 234 235 static struct watch_adapter *alloc_watch_adapter(const char *path, 236 const char *token) 237 { 238 struct watch_adapter *watch; 239 240 watch = kzalloc(sizeof(*watch), GFP_KERNEL); 241 if (watch == NULL) 242 goto out_fail; 243 244 watch->watch.node = kstrdup(path, GFP_KERNEL); 245 if (watch->watch.node == NULL) 246 goto out_free; 247 248 watch->token = kstrdup(token, GFP_KERNEL); 249 if (watch->token == NULL) 250 goto out_free; 251 252 return watch; 253 254 out_free: 255 free_watch_adapter(watch); 256 257 out_fail: 258 return NULL; 259 } 260 261 static void watch_fired(struct xenbus_watch *watch, 262 const char *path, 263 const char *token) 264 { 265 struct watch_adapter *adap; 266 struct xsd_sockmsg hdr; 267 const char *token_caller; 268 int path_len, tok_len, body_len; 269 int ret; 270 LIST_HEAD(staging_q); 271 272 adap = container_of(watch, struct watch_adapter, watch); 273 274 token_caller = adap->token; 275 276 path_len = strlen(path) + 1; 277 tok_len = strlen(token_caller) + 1; 278 body_len = path_len + tok_len; 279 280 hdr.type = XS_WATCH_EVENT; 281 hdr.len = body_len; 282 283 mutex_lock(&adap->dev_data->reply_mutex); 284 285 ret = queue_reply(&staging_q, &hdr, sizeof(hdr)); 286 if (!ret) 287 ret = queue_reply(&staging_q, path, path_len); 288 if (!ret) 289 ret = queue_reply(&staging_q, token_caller, tok_len); 290 291 if (!ret) { 292 /* success: pass reply list onto watcher */ 293 list_splice_tail(&staging_q, &adap->dev_data->read_buffers); 294 wake_up(&adap->dev_data->read_waitq); 295 } else 296 queue_cleanup(&staging_q); 297 298 mutex_unlock(&adap->dev_data->reply_mutex); 299 } 300 301 static void xenbus_file_free(struct kref *kref) 302 { 303 struct xenbus_file_priv *u; 304 struct xenbus_transaction_holder *trans, *tmp; 305 struct watch_adapter *watch, *tmp_watch; 306 struct read_buffer *rb, *tmp_rb; 307 308 u = container_of(kref, struct xenbus_file_priv, kref); 309 310 /* 311 * No need for locking here because there are no other users, 312 * by definition. 313 */ 314 315 list_for_each_entry_safe(trans, tmp, &u->transactions, list) { 316 xenbus_transaction_end(trans->handle, 1); 317 list_del(&trans->list); 318 kfree(trans); 319 } 320 321 list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { 322 unregister_xenbus_watch(&watch->watch); 323 list_del(&watch->list); 324 free_watch_adapter(watch); 325 } 326 327 list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) { 328 list_del(&rb->list); 329 kfree(rb); 330 } 331 kfree(u); 332 } 333 334 static struct xenbus_transaction_holder *xenbus_get_transaction( 335 struct xenbus_file_priv *u, uint32_t tx_id) 336 { 337 struct xenbus_transaction_holder *trans; 338 339 list_for_each_entry(trans, &u->transactions, list) 340 if (trans->handle.id == tx_id) 341 return trans; 342 343 return NULL; 344 } 345 346 void xenbus_dev_queue_reply(struct xb_req_data *req) 347 { 348 struct xenbus_file_priv *u = req->par; 349 struct xenbus_transaction_holder *trans = NULL; 350 int rc; 351 LIST_HEAD(staging_q); 352 353 xs_request_exit(req); 354 355 mutex_lock(&u->msgbuffer_mutex); 356 357 if (req->type == XS_TRANSACTION_START) { 358 trans = xenbus_get_transaction(u, 0); 359 if (WARN_ON(!trans)) 360 goto out; 361 if (req->msg.type == XS_ERROR) { 362 list_del(&trans->list); 363 kfree(trans); 364 } else { 365 rc = kstrtou32(req->body, 10, &trans->handle.id); 366 if (WARN_ON(rc)) 367 goto out; 368 } 369 } else if (req->msg.type == XS_TRANSACTION_END) { 370 trans = xenbus_get_transaction(u, req->msg.tx_id); 371 if (WARN_ON(!trans)) 372 goto out; 373 list_del(&trans->list); 374 kfree(trans); 375 } 376 377 mutex_unlock(&u->msgbuffer_mutex); 378 379 mutex_lock(&u->reply_mutex); 380 rc = queue_reply(&staging_q, &req->msg, sizeof(req->msg)); 381 if (!rc) 382 rc = queue_reply(&staging_q, req->body, req->msg.len); 383 if (!rc) { 384 list_splice_tail(&staging_q, &u->read_buffers); 385 wake_up(&u->read_waitq); 386 } else { 387 queue_cleanup(&staging_q); 388 } 389 mutex_unlock(&u->reply_mutex); 390 391 kfree(req->body); 392 kfree(req); 393 394 kref_put(&u->kref, xenbus_file_free); 395 396 return; 397 398 out: 399 mutex_unlock(&u->msgbuffer_mutex); 400 } 401 402 static int xenbus_command_reply(struct xenbus_file_priv *u, 403 unsigned int msg_type, const char *reply) 404 { 405 struct { 406 struct xsd_sockmsg hdr; 407 const char body[16]; 408 } msg; 409 int rc; 410 411 msg.hdr = u->u.msg; 412 msg.hdr.type = msg_type; 413 msg.hdr.len = strlen(reply) + 1; 414 if (msg.hdr.len > sizeof(msg.body)) 415 return -E2BIG; 416 417 mutex_lock(&u->reply_mutex); 418 rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len); 419 wake_up(&u->read_waitq); 420 mutex_unlock(&u->reply_mutex); 421 422 if (!rc) 423 kref_put(&u->kref, xenbus_file_free); 424 425 return rc; 426 } 427 428 static int xenbus_write_transaction(unsigned msg_type, 429 struct xenbus_file_priv *u) 430 { 431 int rc; 432 struct xenbus_transaction_holder *trans = NULL; 433 434 if (msg_type == XS_TRANSACTION_START) { 435 trans = kzalloc(sizeof(*trans), GFP_KERNEL); 436 if (!trans) { 437 rc = -ENOMEM; 438 goto out; 439 } 440 list_add(&trans->list, &u->transactions); 441 } else if (u->u.msg.tx_id != 0 && 442 !xenbus_get_transaction(u, u->u.msg.tx_id)) 443 return xenbus_command_reply(u, XS_ERROR, "ENOENT"); 444 445 rc = xenbus_dev_request_and_reply(&u->u.msg, u); 446 if (rc) 447 kfree(trans); 448 449 out: 450 return rc; 451 } 452 453 static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u) 454 { 455 struct watch_adapter *watch; 456 char *path, *token; 457 int err, rc; 458 LIST_HEAD(staging_q); 459 460 path = u->u.buffer + sizeof(u->u.msg); 461 token = memchr(path, 0, u->u.msg.len); 462 if (token == NULL) { 463 rc = xenbus_command_reply(u, XS_ERROR, "EINVAL"); 464 goto out; 465 } 466 token++; 467 if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) { 468 rc = xenbus_command_reply(u, XS_ERROR, "EINVAL"); 469 goto out; 470 } 471 472 if (msg_type == XS_WATCH) { 473 watch = alloc_watch_adapter(path, token); 474 if (watch == NULL) { 475 rc = -ENOMEM; 476 goto out; 477 } 478 479 watch->watch.callback = watch_fired; 480 watch->dev_data = u; 481 482 err = register_xenbus_watch(&watch->watch); 483 if (err) { 484 free_watch_adapter(watch); 485 rc = err; 486 goto out; 487 } 488 list_add(&watch->list, &u->watches); 489 } else { 490 list_for_each_entry(watch, &u->watches, list) { 491 if (!strcmp(watch->token, token) && 492 !strcmp(watch->watch.node, path)) { 493 unregister_xenbus_watch(&watch->watch); 494 list_del(&watch->list); 495 free_watch_adapter(watch); 496 break; 497 } 498 } 499 } 500 501 /* Success. Synthesize a reply to say all is OK. */ 502 rc = xenbus_command_reply(u, msg_type, "OK"); 503 504 out: 505 return rc; 506 } 507 508 static ssize_t xenbus_file_write(struct file *filp, 509 const char __user *ubuf, 510 size_t len, loff_t *ppos) 511 { 512 struct xenbus_file_priv *u = filp->private_data; 513 uint32_t msg_type; 514 int rc = len; 515 int ret; 516 LIST_HEAD(staging_q); 517 518 /* 519 * We're expecting usermode to be writing properly formed 520 * xenbus messages. If they write an incomplete message we 521 * buffer it up. Once it is complete, we act on it. 522 */ 523 524 /* 525 * Make sure concurrent writers can't stomp all over each 526 * other's messages and make a mess of our partial message 527 * buffer. We don't make any attemppt to stop multiple 528 * writers from making a mess of each other's incomplete 529 * messages; we're just trying to guarantee our own internal 530 * consistency and make sure that single writes are handled 531 * atomically. 532 */ 533 mutex_lock(&u->msgbuffer_mutex); 534 535 /* Get this out of the way early to avoid confusion */ 536 if (len == 0) 537 goto out; 538 539 /* Can't write a xenbus message larger we can buffer */ 540 if (len > sizeof(u->u.buffer) - u->len) { 541 /* On error, dump existing buffer */ 542 u->len = 0; 543 rc = -EINVAL; 544 goto out; 545 } 546 547 ret = copy_from_user(u->u.buffer + u->len, ubuf, len); 548 549 if (ret != 0) { 550 rc = -EFAULT; 551 goto out; 552 } 553 554 /* Deal with a partial copy. */ 555 len -= ret; 556 rc = len; 557 558 u->len += len; 559 560 /* Return if we haven't got a full message yet */ 561 if (u->len < sizeof(u->u.msg)) 562 goto out; /* not even the header yet */ 563 564 /* If we're expecting a message that's larger than we can 565 possibly send, dump what we have and return an error. */ 566 if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) { 567 rc = -E2BIG; 568 u->len = 0; 569 goto out; 570 } 571 572 if (u->len < (sizeof(u->u.msg) + u->u.msg.len)) 573 goto out; /* incomplete data portion */ 574 575 /* 576 * OK, now we have a complete message. Do something with it. 577 */ 578 579 kref_get(&u->kref); 580 581 msg_type = u->u.msg.type; 582 583 switch (msg_type) { 584 case XS_WATCH: 585 case XS_UNWATCH: 586 /* (Un)Ask for some path to be watched for changes */ 587 ret = xenbus_write_watch(msg_type, u); 588 break; 589 590 default: 591 /* Send out a transaction */ 592 ret = xenbus_write_transaction(msg_type, u); 593 break; 594 } 595 if (ret != 0) { 596 rc = ret; 597 kref_put(&u->kref, xenbus_file_free); 598 } 599 600 /* Buffered message consumed */ 601 u->len = 0; 602 603 out: 604 mutex_unlock(&u->msgbuffer_mutex); 605 return rc; 606 } 607 608 static int xenbus_file_open(struct inode *inode, struct file *filp) 609 { 610 struct xenbus_file_priv *u; 611 612 if (xen_store_evtchn == 0) 613 return -ENOENT; 614 615 nonseekable_open(inode, filp); 616 617 filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */ 618 619 u = kzalloc(sizeof(*u), GFP_KERNEL); 620 if (u == NULL) 621 return -ENOMEM; 622 623 kref_init(&u->kref); 624 625 INIT_LIST_HEAD(&u->transactions); 626 INIT_LIST_HEAD(&u->watches); 627 INIT_LIST_HEAD(&u->read_buffers); 628 init_waitqueue_head(&u->read_waitq); 629 630 mutex_init(&u->reply_mutex); 631 mutex_init(&u->msgbuffer_mutex); 632 633 filp->private_data = u; 634 635 return 0; 636 } 637 638 static int xenbus_file_release(struct inode *inode, struct file *filp) 639 { 640 struct xenbus_file_priv *u = filp->private_data; 641 642 kref_put(&u->kref, xenbus_file_free); 643 644 return 0; 645 } 646 647 static unsigned int xenbus_file_poll(struct file *file, poll_table *wait) 648 { 649 struct xenbus_file_priv *u = file->private_data; 650 651 poll_wait(file, &u->read_waitq, wait); 652 if (!list_empty(&u->read_buffers)) 653 return POLLIN | POLLRDNORM; 654 return 0; 655 } 656 657 const struct file_operations xen_xenbus_fops = { 658 .read = xenbus_file_read, 659 .write = xenbus_file_write, 660 .open = xenbus_file_open, 661 .release = xenbus_file_release, 662 .poll = xenbus_file_poll, 663 .llseek = no_llseek, 664 }; 665 EXPORT_SYMBOL_GPL(xen_xenbus_fops); 666 667 static struct miscdevice xenbus_dev = { 668 .minor = MISC_DYNAMIC_MINOR, 669 .name = "xen/xenbus", 670 .fops = &xen_xenbus_fops, 671 }; 672 673 static int __init xenbus_init(void) 674 { 675 int err; 676 677 if (!xen_domain()) 678 return -ENODEV; 679 680 err = misc_register(&xenbus_dev); 681 if (err) 682 pr_err("Could not register xenbus frontend device\n"); 683 return err; 684 } 685 device_initcall(xenbus_init); 686