1 /* 2 * Driver giving user-space access to the kernel's xenbus connection 3 * to xenstore. 4 * 5 * Copyright (c) 2005, Christian Limpach 6 * Copyright (c) 2005, Rusty Russell, IBM Corporation 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License version 2 10 * as published by the Free Software Foundation; or, when distributed 11 * separately from the Linux kernel or incorporated into other 12 * software packages, subject to the following license: 13 * 14 * Permission is hereby granted, free of charge, to any person obtaining a copy 15 * of this source file (the "Software"), to deal in the Software without 16 * restriction, including without limitation the rights to use, copy, modify, 17 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 18 * and to permit persons to whom the Software is furnished to do so, subject to 19 * the following conditions: 20 * 21 * The above copyright notice and this permission notice shall be included in 22 * all copies or substantial portions of the Software. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 30 * IN THE SOFTWARE. 31 * 32 * Changes: 33 * 2008-10-07 Alex Zeffertt Replaced /proc/xen/xenbus with xenfs filesystem 34 * and /proc/xen compatibility mount point. 35 * Turned xenfs into a loadable module. 36 */ 37 38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 39 40 #include <linux/kernel.h> 41 #include <linux/errno.h> 42 #include <linux/uio.h> 43 #include <linux/notifier.h> 44 #include <linux/wait.h> 45 #include <linux/fs.h> 46 #include <linux/poll.h> 47 #include <linux/mutex.h> 48 #include <linux/sched.h> 49 #include <linux/spinlock.h> 50 #include <linux/mount.h> 51 #include <linux/pagemap.h> 52 #include <linux/uaccess.h> 53 #include <linux/init.h> 54 #include <linux/namei.h> 55 #include <linux/string.h> 56 #include <linux/slab.h> 57 #include <linux/miscdevice.h> 58 59 #include <xen/xenbus.h> 60 #include <xen/xen.h> 61 #include <asm/xen/hypervisor.h> 62 63 #include "xenbus.h" 64 65 /* 66 * An element of a list of outstanding transactions, for which we're 67 * still waiting a reply. 68 */ 69 struct xenbus_transaction_holder { 70 struct list_head list; 71 struct xenbus_transaction handle; 72 }; 73 74 /* 75 * A buffer of data on the queue. 76 */ 77 struct read_buffer { 78 struct list_head list; 79 unsigned int cons; 80 unsigned int len; 81 char msg[]; 82 }; 83 84 struct xenbus_file_priv { 85 /* 86 * msgbuffer_mutex is held while partial requests are built up 87 * and complete requests are acted on. It therefore protects 88 * the "transactions" and "watches" lists, and the partial 89 * request length and buffer. 90 * 91 * reply_mutex protects the reply being built up to return to 92 * usermode. It nests inside msgbuffer_mutex but may be held 93 * alone during a watch callback. 94 */ 95 struct mutex msgbuffer_mutex; 96 97 /* In-progress transactions */ 98 struct list_head transactions; 99 100 /* Active watches. */ 101 struct list_head watches; 102 103 /* Partial request. */ 104 unsigned int len; 105 union { 106 struct xsd_sockmsg msg; 107 char buffer[XENSTORE_PAYLOAD_MAX]; 108 } u; 109 110 /* Response queue. */ 111 struct mutex reply_mutex; 112 struct list_head read_buffers; 113 wait_queue_head_t read_waitq; 114 115 struct kref kref; 116 }; 117 118 /* Read out any raw xenbus messages queued up. */ 119 static ssize_t xenbus_file_read(struct file *filp, 120 char __user *ubuf, 121 size_t len, loff_t *ppos) 122 { 123 struct xenbus_file_priv *u = filp->private_data; 124 struct read_buffer *rb; 125 unsigned i; 126 int ret; 127 128 mutex_lock(&u->reply_mutex); 129 again: 130 while (list_empty(&u->read_buffers)) { 131 mutex_unlock(&u->reply_mutex); 132 if (filp->f_flags & O_NONBLOCK) 133 return -EAGAIN; 134 135 ret = wait_event_interruptible(u->read_waitq, 136 !list_empty(&u->read_buffers)); 137 if (ret) 138 return ret; 139 mutex_lock(&u->reply_mutex); 140 } 141 142 rb = list_entry(u->read_buffers.next, struct read_buffer, list); 143 i = 0; 144 while (i < len) { 145 unsigned sz = min((unsigned)len - i, rb->len - rb->cons); 146 147 ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz); 148 149 i += sz - ret; 150 rb->cons += sz - ret; 151 152 if (ret != 0) { 153 if (i == 0) 154 i = -EFAULT; 155 goto out; 156 } 157 158 /* Clear out buffer if it has been consumed */ 159 if (rb->cons == rb->len) { 160 list_del(&rb->list); 161 kfree(rb); 162 if (list_empty(&u->read_buffers)) 163 break; 164 rb = list_entry(u->read_buffers.next, 165 struct read_buffer, list); 166 } 167 } 168 if (i == 0) 169 goto again; 170 171 out: 172 mutex_unlock(&u->reply_mutex); 173 return i; 174 } 175 176 /* 177 * Add a buffer to the queue. Caller must hold the appropriate lock 178 * if the queue is not local. (Commonly the caller will build up 179 * multiple queued buffers on a temporary local list, and then add it 180 * to the appropriate list under lock once all the buffers have een 181 * successfully allocated.) 182 */ 183 static int queue_reply(struct list_head *queue, const void *data, size_t len) 184 { 185 struct read_buffer *rb; 186 187 if (len == 0) 188 return 0; 189 if (len > XENSTORE_PAYLOAD_MAX) 190 return -EINVAL; 191 192 rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); 193 if (rb == NULL) 194 return -ENOMEM; 195 196 rb->cons = 0; 197 rb->len = len; 198 199 memcpy(rb->msg, data, len); 200 201 list_add_tail(&rb->list, queue); 202 return 0; 203 } 204 205 /* 206 * Free all the read_buffer s on a list. 207 * Caller must have sole reference to list. 208 */ 209 static void queue_cleanup(struct list_head *list) 210 { 211 struct read_buffer *rb; 212 213 while (!list_empty(list)) { 214 rb = list_entry(list->next, struct read_buffer, list); 215 list_del(list->next); 216 kfree(rb); 217 } 218 } 219 220 struct watch_adapter { 221 struct list_head list; 222 struct xenbus_watch watch; 223 struct xenbus_file_priv *dev_data; 224 char *token; 225 }; 226 227 static void free_watch_adapter(struct watch_adapter *watch) 228 { 229 kfree(watch->watch.node); 230 kfree(watch->token); 231 kfree(watch); 232 } 233 234 static struct watch_adapter *alloc_watch_adapter(const char *path, 235 const char *token) 236 { 237 struct watch_adapter *watch; 238 239 watch = kzalloc(sizeof(*watch), GFP_KERNEL); 240 if (watch == NULL) 241 goto out_fail; 242 243 watch->watch.node = kstrdup(path, GFP_KERNEL); 244 if (watch->watch.node == NULL) 245 goto out_free; 246 247 watch->token = kstrdup(token, GFP_KERNEL); 248 if (watch->token == NULL) 249 goto out_free; 250 251 return watch; 252 253 out_free: 254 free_watch_adapter(watch); 255 256 out_fail: 257 return NULL; 258 } 259 260 static void watch_fired(struct xenbus_watch *watch, 261 const char *path, 262 const char *token) 263 { 264 struct watch_adapter *adap; 265 struct xsd_sockmsg hdr; 266 const char *token_caller; 267 int path_len, tok_len, body_len; 268 int ret; 269 LIST_HEAD(staging_q); 270 271 adap = container_of(watch, struct watch_adapter, watch); 272 273 token_caller = adap->token; 274 275 path_len = strlen(path) + 1; 276 tok_len = strlen(token_caller) + 1; 277 body_len = path_len + tok_len; 278 279 hdr.type = XS_WATCH_EVENT; 280 hdr.len = body_len; 281 282 mutex_lock(&adap->dev_data->reply_mutex); 283 284 ret = queue_reply(&staging_q, &hdr, sizeof(hdr)); 285 if (!ret) 286 ret = queue_reply(&staging_q, path, path_len); 287 if (!ret) 288 ret = queue_reply(&staging_q, token_caller, tok_len); 289 290 if (!ret) { 291 /* success: pass reply list onto watcher */ 292 list_splice_tail(&staging_q, &adap->dev_data->read_buffers); 293 wake_up(&adap->dev_data->read_waitq); 294 } else 295 queue_cleanup(&staging_q); 296 297 mutex_unlock(&adap->dev_data->reply_mutex); 298 } 299 300 static void xenbus_file_free(struct kref *kref) 301 { 302 struct xenbus_file_priv *u; 303 struct xenbus_transaction_holder *trans, *tmp; 304 struct watch_adapter *watch, *tmp_watch; 305 struct read_buffer *rb, *tmp_rb; 306 307 u = container_of(kref, struct xenbus_file_priv, kref); 308 309 /* 310 * No need for locking here because there are no other users, 311 * by definition. 312 */ 313 314 list_for_each_entry_safe(trans, tmp, &u->transactions, list) { 315 xenbus_transaction_end(trans->handle, 1); 316 list_del(&trans->list); 317 kfree(trans); 318 } 319 320 list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { 321 unregister_xenbus_watch(&watch->watch); 322 list_del(&watch->list); 323 free_watch_adapter(watch); 324 } 325 326 list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) { 327 list_del(&rb->list); 328 kfree(rb); 329 } 330 kfree(u); 331 } 332 333 static struct xenbus_transaction_holder *xenbus_get_transaction( 334 struct xenbus_file_priv *u, uint32_t tx_id) 335 { 336 struct xenbus_transaction_holder *trans; 337 338 list_for_each_entry(trans, &u->transactions, list) 339 if (trans->handle.id == tx_id) 340 return trans; 341 342 return NULL; 343 } 344 345 void xenbus_dev_queue_reply(struct xb_req_data *req) 346 { 347 struct xenbus_file_priv *u = req->par; 348 struct xenbus_transaction_holder *trans = NULL; 349 int rc; 350 LIST_HEAD(staging_q); 351 352 xs_request_exit(req); 353 354 mutex_lock(&u->msgbuffer_mutex); 355 356 if (req->type == XS_TRANSACTION_START) { 357 trans = xenbus_get_transaction(u, 0); 358 if (WARN_ON(!trans)) 359 goto out; 360 if (req->msg.type == XS_ERROR) { 361 list_del(&trans->list); 362 kfree(trans); 363 } else { 364 rc = kstrtou32(req->body, 10, &trans->handle.id); 365 if (WARN_ON(rc)) 366 goto out; 367 } 368 } else if (req->type == XS_TRANSACTION_END) { 369 trans = xenbus_get_transaction(u, req->msg.tx_id); 370 if (WARN_ON(!trans)) 371 goto out; 372 list_del(&trans->list); 373 kfree(trans); 374 } 375 376 mutex_unlock(&u->msgbuffer_mutex); 377 378 mutex_lock(&u->reply_mutex); 379 rc = queue_reply(&staging_q, &req->msg, sizeof(req->msg)); 380 if (!rc) 381 rc = queue_reply(&staging_q, req->body, req->msg.len); 382 if (!rc) { 383 list_splice_tail(&staging_q, &u->read_buffers); 384 wake_up(&u->read_waitq); 385 } else { 386 queue_cleanup(&staging_q); 387 } 388 mutex_unlock(&u->reply_mutex); 389 390 kfree(req->body); 391 kfree(req); 392 393 kref_put(&u->kref, xenbus_file_free); 394 395 return; 396 397 out: 398 mutex_unlock(&u->msgbuffer_mutex); 399 } 400 401 static int xenbus_command_reply(struct xenbus_file_priv *u, 402 unsigned int msg_type, const char *reply) 403 { 404 struct { 405 struct xsd_sockmsg hdr; 406 char body[16]; 407 } msg; 408 int rc; 409 410 msg.hdr = u->u.msg; 411 msg.hdr.type = msg_type; 412 msg.hdr.len = strlen(reply) + 1; 413 if (msg.hdr.len > sizeof(msg.body)) 414 return -E2BIG; 415 memcpy(&msg.body, reply, msg.hdr.len); 416 417 mutex_lock(&u->reply_mutex); 418 rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len); 419 wake_up(&u->read_waitq); 420 mutex_unlock(&u->reply_mutex); 421 422 if (!rc) 423 kref_put(&u->kref, xenbus_file_free); 424 425 return rc; 426 } 427 428 static int xenbus_write_transaction(unsigned msg_type, 429 struct xenbus_file_priv *u) 430 { 431 int rc; 432 struct xenbus_transaction_holder *trans = NULL; 433 struct { 434 struct xsd_sockmsg hdr; 435 char body[]; 436 } *msg = (void *)u->u.buffer; 437 438 if (msg_type == XS_TRANSACTION_START) { 439 trans = kzalloc(sizeof(*trans), GFP_KERNEL); 440 if (!trans) { 441 rc = -ENOMEM; 442 goto out; 443 } 444 list_add(&trans->list, &u->transactions); 445 } else if (msg->hdr.tx_id != 0 && 446 !xenbus_get_transaction(u, msg->hdr.tx_id)) 447 return xenbus_command_reply(u, XS_ERROR, "ENOENT"); 448 else if (msg_type == XS_TRANSACTION_END && 449 !(msg->hdr.len == 2 && 450 (!strcmp(msg->body, "T") || !strcmp(msg->body, "F")))) 451 return xenbus_command_reply(u, XS_ERROR, "EINVAL"); 452 453 rc = xenbus_dev_request_and_reply(&msg->hdr, u); 454 if (rc && trans) { 455 list_del(&trans->list); 456 kfree(trans); 457 } 458 459 out: 460 return rc; 461 } 462 463 static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u) 464 { 465 struct watch_adapter *watch; 466 char *path, *token; 467 int err, rc; 468 LIST_HEAD(staging_q); 469 470 path = u->u.buffer + sizeof(u->u.msg); 471 token = memchr(path, 0, u->u.msg.len); 472 if (token == NULL) { 473 rc = xenbus_command_reply(u, XS_ERROR, "EINVAL"); 474 goto out; 475 } 476 token++; 477 if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) { 478 rc = xenbus_command_reply(u, XS_ERROR, "EINVAL"); 479 goto out; 480 } 481 482 if (msg_type == XS_WATCH) { 483 watch = alloc_watch_adapter(path, token); 484 if (watch == NULL) { 485 rc = -ENOMEM; 486 goto out; 487 } 488 489 watch->watch.callback = watch_fired; 490 watch->dev_data = u; 491 492 err = register_xenbus_watch(&watch->watch); 493 if (err) { 494 free_watch_adapter(watch); 495 rc = err; 496 goto out; 497 } 498 list_add(&watch->list, &u->watches); 499 } else { 500 list_for_each_entry(watch, &u->watches, list) { 501 if (!strcmp(watch->token, token) && 502 !strcmp(watch->watch.node, path)) { 503 unregister_xenbus_watch(&watch->watch); 504 list_del(&watch->list); 505 free_watch_adapter(watch); 506 break; 507 } 508 } 509 } 510 511 /* Success. Synthesize a reply to say all is OK. */ 512 rc = xenbus_command_reply(u, msg_type, "OK"); 513 514 out: 515 return rc; 516 } 517 518 static ssize_t xenbus_file_write(struct file *filp, 519 const char __user *ubuf, 520 size_t len, loff_t *ppos) 521 { 522 struct xenbus_file_priv *u = filp->private_data; 523 uint32_t msg_type; 524 int rc = len; 525 int ret; 526 LIST_HEAD(staging_q); 527 528 /* 529 * We're expecting usermode to be writing properly formed 530 * xenbus messages. If they write an incomplete message we 531 * buffer it up. Once it is complete, we act on it. 532 */ 533 534 /* 535 * Make sure concurrent writers can't stomp all over each 536 * other's messages and make a mess of our partial message 537 * buffer. We don't make any attemppt to stop multiple 538 * writers from making a mess of each other's incomplete 539 * messages; we're just trying to guarantee our own internal 540 * consistency and make sure that single writes are handled 541 * atomically. 542 */ 543 mutex_lock(&u->msgbuffer_mutex); 544 545 /* Get this out of the way early to avoid confusion */ 546 if (len == 0) 547 goto out; 548 549 /* Can't write a xenbus message larger we can buffer */ 550 if (len > sizeof(u->u.buffer) - u->len) { 551 /* On error, dump existing buffer */ 552 u->len = 0; 553 rc = -EINVAL; 554 goto out; 555 } 556 557 ret = copy_from_user(u->u.buffer + u->len, ubuf, len); 558 559 if (ret != 0) { 560 rc = -EFAULT; 561 goto out; 562 } 563 564 /* Deal with a partial copy. */ 565 len -= ret; 566 rc = len; 567 568 u->len += len; 569 570 /* Return if we haven't got a full message yet */ 571 if (u->len < sizeof(u->u.msg)) 572 goto out; /* not even the header yet */ 573 574 /* If we're expecting a message that's larger than we can 575 possibly send, dump what we have and return an error. */ 576 if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) { 577 rc = -E2BIG; 578 u->len = 0; 579 goto out; 580 } 581 582 if (u->len < (sizeof(u->u.msg) + u->u.msg.len)) 583 goto out; /* incomplete data portion */ 584 585 /* 586 * OK, now we have a complete message. Do something with it. 587 */ 588 589 kref_get(&u->kref); 590 591 msg_type = u->u.msg.type; 592 593 switch (msg_type) { 594 case XS_WATCH: 595 case XS_UNWATCH: 596 /* (Un)Ask for some path to be watched for changes */ 597 ret = xenbus_write_watch(msg_type, u); 598 break; 599 600 default: 601 /* Send out a transaction */ 602 ret = xenbus_write_transaction(msg_type, u); 603 break; 604 } 605 if (ret != 0) { 606 rc = ret; 607 kref_put(&u->kref, xenbus_file_free); 608 } 609 610 /* Buffered message consumed */ 611 u->len = 0; 612 613 out: 614 mutex_unlock(&u->msgbuffer_mutex); 615 return rc; 616 } 617 618 static int xenbus_file_open(struct inode *inode, struct file *filp) 619 { 620 struct xenbus_file_priv *u; 621 622 if (xen_store_evtchn == 0) 623 return -ENOENT; 624 625 nonseekable_open(inode, filp); 626 627 filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */ 628 629 u = kzalloc(sizeof(*u), GFP_KERNEL); 630 if (u == NULL) 631 return -ENOMEM; 632 633 kref_init(&u->kref); 634 635 INIT_LIST_HEAD(&u->transactions); 636 INIT_LIST_HEAD(&u->watches); 637 INIT_LIST_HEAD(&u->read_buffers); 638 init_waitqueue_head(&u->read_waitq); 639 640 mutex_init(&u->reply_mutex); 641 mutex_init(&u->msgbuffer_mutex); 642 643 filp->private_data = u; 644 645 return 0; 646 } 647 648 static int xenbus_file_release(struct inode *inode, struct file *filp) 649 { 650 struct xenbus_file_priv *u = filp->private_data; 651 652 kref_put(&u->kref, xenbus_file_free); 653 654 return 0; 655 } 656 657 static __poll_t xenbus_file_poll(struct file *file, poll_table *wait) 658 { 659 struct xenbus_file_priv *u = file->private_data; 660 661 poll_wait(file, &u->read_waitq, wait); 662 if (!list_empty(&u->read_buffers)) 663 return EPOLLIN | EPOLLRDNORM; 664 return 0; 665 } 666 667 const struct file_operations xen_xenbus_fops = { 668 .read = xenbus_file_read, 669 .write = xenbus_file_write, 670 .open = xenbus_file_open, 671 .release = xenbus_file_release, 672 .poll = xenbus_file_poll, 673 .llseek = no_llseek, 674 }; 675 EXPORT_SYMBOL_GPL(xen_xenbus_fops); 676 677 static struct miscdevice xenbus_dev = { 678 .minor = MISC_DYNAMIC_MINOR, 679 .name = "xen/xenbus", 680 .fops = &xen_xenbus_fops, 681 }; 682 683 static int __init xenbus_init(void) 684 { 685 int err; 686 687 if (!xen_domain()) 688 return -ENODEV; 689 690 err = misc_register(&xenbus_dev); 691 if (err) 692 pr_err("Could not register xenbus frontend device\n"); 693 return err; 694 } 695 device_initcall(xenbus_init); 696