1 /* 2 * Driver giving user-space access to the kernel's xenbus connection 3 * to xenstore. 4 * 5 * Copyright (c) 2005, Christian Limpach 6 * Copyright (c) 2005, Rusty Russell, IBM Corporation 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License version 2 10 * as published by the Free Software Foundation; or, when distributed 11 * separately from the Linux kernel or incorporated into other 12 * software packages, subject to the following license: 13 * 14 * Permission is hereby granted, free of charge, to any person obtaining a copy 15 * of this source file (the "Software"), to deal in the Software without 16 * restriction, including without limitation the rights to use, copy, modify, 17 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 18 * and to permit persons to whom the Software is furnished to do so, subject to 19 * the following conditions: 20 * 21 * The above copyright notice and this permission notice shall be included in 22 * all copies or substantial portions of the Software. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 30 * IN THE SOFTWARE. 31 * 32 * Changes: 33 * 2008-10-07 Alex Zeffertt Replaced /proc/xen/xenbus with xenfs filesystem 34 * and /proc/xen compatibility mount point. 35 * Turned xenfs into a loadable module. 36 */ 37 38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 39 40 #include <linux/kernel.h> 41 #include <linux/errno.h> 42 #include <linux/uio.h> 43 #include <linux/notifier.h> 44 #include <linux/wait.h> 45 #include <linux/fs.h> 46 #include <linux/poll.h> 47 #include <linux/mutex.h> 48 #include <linux/sched.h> 49 #include <linux/spinlock.h> 50 #include <linux/mount.h> 51 #include <linux/pagemap.h> 52 #include <linux/uaccess.h> 53 #include <linux/init.h> 54 #include <linux/namei.h> 55 #include <linux/string.h> 56 #include <linux/slab.h> 57 #include <linux/miscdevice.h> 58 #include <linux/module.h> 59 60 #include "xenbus_comms.h" 61 62 #include <xen/xenbus.h> 63 #include <xen/xen.h> 64 #include <asm/xen/hypervisor.h> 65 66 MODULE_LICENSE("GPL"); 67 68 /* 69 * An element of a list of outstanding transactions, for which we're 70 * still waiting a reply. 71 */ 72 struct xenbus_transaction_holder { 73 struct list_head list; 74 struct xenbus_transaction handle; 75 }; 76 77 /* 78 * A buffer of data on the queue. 79 */ 80 struct read_buffer { 81 struct list_head list; 82 unsigned int cons; 83 unsigned int len; 84 char msg[]; 85 }; 86 87 struct xenbus_file_priv { 88 /* 89 * msgbuffer_mutex is held while partial requests are built up 90 * and complete requests are acted on. It therefore protects 91 * the "transactions" and "watches" lists, and the partial 92 * request length and buffer. 93 * 94 * reply_mutex protects the reply being built up to return to 95 * usermode. It nests inside msgbuffer_mutex but may be held 96 * alone during a watch callback. 97 */ 98 struct mutex msgbuffer_mutex; 99 100 /* In-progress transactions */ 101 struct list_head transactions; 102 103 /* Active watches. */ 104 struct list_head watches; 105 106 /* Partial request. */ 107 unsigned int len; 108 union { 109 struct xsd_sockmsg msg; 110 char buffer[XENSTORE_PAYLOAD_MAX]; 111 } u; 112 113 /* Response queue. */ 114 struct mutex reply_mutex; 115 struct list_head read_buffers; 116 wait_queue_head_t read_waitq; 117 118 }; 119 120 /* Read out any raw xenbus messages queued up. */ 121 static ssize_t xenbus_file_read(struct file *filp, 122 char __user *ubuf, 123 size_t len, loff_t *ppos) 124 { 125 struct xenbus_file_priv *u = filp->private_data; 126 struct read_buffer *rb; 127 unsigned i; 128 int ret; 129 130 mutex_lock(&u->reply_mutex); 131 again: 132 while (list_empty(&u->read_buffers)) { 133 mutex_unlock(&u->reply_mutex); 134 if (filp->f_flags & O_NONBLOCK) 135 return -EAGAIN; 136 137 ret = wait_event_interruptible(u->read_waitq, 138 !list_empty(&u->read_buffers)); 139 if (ret) 140 return ret; 141 mutex_lock(&u->reply_mutex); 142 } 143 144 rb = list_entry(u->read_buffers.next, struct read_buffer, list); 145 i = 0; 146 while (i < len) { 147 unsigned sz = min((unsigned)len - i, rb->len - rb->cons); 148 149 ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz); 150 151 i += sz - ret; 152 rb->cons += sz - ret; 153 154 if (ret != 0) { 155 if (i == 0) 156 i = -EFAULT; 157 goto out; 158 } 159 160 /* Clear out buffer if it has been consumed */ 161 if (rb->cons == rb->len) { 162 list_del(&rb->list); 163 kfree(rb); 164 if (list_empty(&u->read_buffers)) 165 break; 166 rb = list_entry(u->read_buffers.next, 167 struct read_buffer, list); 168 } 169 } 170 if (i == 0) 171 goto again; 172 173 out: 174 mutex_unlock(&u->reply_mutex); 175 return i; 176 } 177 178 /* 179 * Add a buffer to the queue. Caller must hold the appropriate lock 180 * if the queue is not local. (Commonly the caller will build up 181 * multiple queued buffers on a temporary local list, and then add it 182 * to the appropriate list under lock once all the buffers have een 183 * successfully allocated.) 184 */ 185 static int queue_reply(struct list_head *queue, const void *data, size_t len) 186 { 187 struct read_buffer *rb; 188 189 if (len == 0) 190 return 0; 191 if (len > XENSTORE_PAYLOAD_MAX) 192 return -EINVAL; 193 194 rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); 195 if (rb == NULL) 196 return -ENOMEM; 197 198 rb->cons = 0; 199 rb->len = len; 200 201 memcpy(rb->msg, data, len); 202 203 list_add_tail(&rb->list, queue); 204 return 0; 205 } 206 207 /* 208 * Free all the read_buffer s on a list. 209 * Caller must have sole reference to list. 210 */ 211 static void queue_cleanup(struct list_head *list) 212 { 213 struct read_buffer *rb; 214 215 while (!list_empty(list)) { 216 rb = list_entry(list->next, struct read_buffer, list); 217 list_del(list->next); 218 kfree(rb); 219 } 220 } 221 222 struct watch_adapter { 223 struct list_head list; 224 struct xenbus_watch watch; 225 struct xenbus_file_priv *dev_data; 226 char *token; 227 }; 228 229 static void free_watch_adapter(struct watch_adapter *watch) 230 { 231 kfree(watch->watch.node); 232 kfree(watch->token); 233 kfree(watch); 234 } 235 236 static struct watch_adapter *alloc_watch_adapter(const char *path, 237 const char *token) 238 { 239 struct watch_adapter *watch; 240 241 watch = kzalloc(sizeof(*watch), GFP_KERNEL); 242 if (watch == NULL) 243 goto out_fail; 244 245 watch->watch.node = kstrdup(path, GFP_KERNEL); 246 if (watch->watch.node == NULL) 247 goto out_free; 248 249 watch->token = kstrdup(token, GFP_KERNEL); 250 if (watch->token == NULL) 251 goto out_free; 252 253 return watch; 254 255 out_free: 256 free_watch_adapter(watch); 257 258 out_fail: 259 return NULL; 260 } 261 262 static void watch_fired(struct xenbus_watch *watch, 263 const char **vec, 264 unsigned int len) 265 { 266 struct watch_adapter *adap; 267 struct xsd_sockmsg hdr; 268 const char *path, *token; 269 int path_len, tok_len, body_len, data_len = 0; 270 int ret; 271 LIST_HEAD(staging_q); 272 273 adap = container_of(watch, struct watch_adapter, watch); 274 275 path = vec[XS_WATCH_PATH]; 276 token = adap->token; 277 278 path_len = strlen(path) + 1; 279 tok_len = strlen(token) + 1; 280 if (len > 2) 281 data_len = vec[len] - vec[2] + 1; 282 body_len = path_len + tok_len + data_len; 283 284 hdr.type = XS_WATCH_EVENT; 285 hdr.len = body_len; 286 287 mutex_lock(&adap->dev_data->reply_mutex); 288 289 ret = queue_reply(&staging_q, &hdr, sizeof(hdr)); 290 if (!ret) 291 ret = queue_reply(&staging_q, path, path_len); 292 if (!ret) 293 ret = queue_reply(&staging_q, token, tok_len); 294 if (!ret && len > 2) 295 ret = queue_reply(&staging_q, vec[2], data_len); 296 297 if (!ret) { 298 /* success: pass reply list onto watcher */ 299 list_splice_tail(&staging_q, &adap->dev_data->read_buffers); 300 wake_up(&adap->dev_data->read_waitq); 301 } else 302 queue_cleanup(&staging_q); 303 304 mutex_unlock(&adap->dev_data->reply_mutex); 305 } 306 307 static int xenbus_write_transaction(unsigned msg_type, 308 struct xenbus_file_priv *u) 309 { 310 int rc; 311 void *reply; 312 struct xenbus_transaction_holder *trans = NULL; 313 LIST_HEAD(staging_q); 314 315 if (msg_type == XS_TRANSACTION_START) { 316 trans = kmalloc(sizeof(*trans), GFP_KERNEL); 317 if (!trans) { 318 rc = -ENOMEM; 319 goto out; 320 } 321 } 322 323 reply = xenbus_dev_request_and_reply(&u->u.msg); 324 if (IS_ERR(reply)) { 325 kfree(trans); 326 rc = PTR_ERR(reply); 327 goto out; 328 } 329 330 if (msg_type == XS_TRANSACTION_START) { 331 if (u->u.msg.type == XS_ERROR) 332 kfree(trans); 333 else { 334 trans->handle.id = simple_strtoul(reply, NULL, 0); 335 list_add(&trans->list, &u->transactions); 336 } 337 } else if (u->u.msg.type == XS_TRANSACTION_END) { 338 list_for_each_entry(trans, &u->transactions, list) 339 if (trans->handle.id == u->u.msg.tx_id) 340 break; 341 BUG_ON(&trans->list == &u->transactions); 342 list_del(&trans->list); 343 344 kfree(trans); 345 } 346 347 mutex_lock(&u->reply_mutex); 348 rc = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg)); 349 if (!rc) 350 rc = queue_reply(&staging_q, reply, u->u.msg.len); 351 if (!rc) { 352 list_splice_tail(&staging_q, &u->read_buffers); 353 wake_up(&u->read_waitq); 354 } else { 355 queue_cleanup(&staging_q); 356 } 357 mutex_unlock(&u->reply_mutex); 358 359 kfree(reply); 360 361 out: 362 return rc; 363 } 364 365 static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u) 366 { 367 struct watch_adapter *watch, *tmp_watch; 368 char *path, *token; 369 int err, rc; 370 LIST_HEAD(staging_q); 371 372 path = u->u.buffer + sizeof(u->u.msg); 373 token = memchr(path, 0, u->u.msg.len); 374 if (token == NULL) { 375 rc = -EILSEQ; 376 goto out; 377 } 378 token++; 379 if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) { 380 rc = -EILSEQ; 381 goto out; 382 } 383 384 if (msg_type == XS_WATCH) { 385 watch = alloc_watch_adapter(path, token); 386 if (watch == NULL) { 387 rc = -ENOMEM; 388 goto out; 389 } 390 391 watch->watch.callback = watch_fired; 392 watch->dev_data = u; 393 394 err = register_xenbus_watch(&watch->watch); 395 if (err) { 396 free_watch_adapter(watch); 397 rc = err; 398 goto out; 399 } 400 list_add(&watch->list, &u->watches); 401 } else { 402 list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { 403 if (!strcmp(watch->token, token) && 404 !strcmp(watch->watch.node, path)) { 405 unregister_xenbus_watch(&watch->watch); 406 list_del(&watch->list); 407 free_watch_adapter(watch); 408 break; 409 } 410 } 411 } 412 413 /* Success. Synthesize a reply to say all is OK. */ 414 { 415 struct { 416 struct xsd_sockmsg hdr; 417 char body[3]; 418 } __packed reply = { 419 { 420 .type = msg_type, 421 .len = sizeof(reply.body) 422 }, 423 "OK" 424 }; 425 426 mutex_lock(&u->reply_mutex); 427 rc = queue_reply(&u->read_buffers, &reply, sizeof(reply)); 428 wake_up(&u->read_waitq); 429 mutex_unlock(&u->reply_mutex); 430 } 431 432 out: 433 return rc; 434 } 435 436 static ssize_t xenbus_file_write(struct file *filp, 437 const char __user *ubuf, 438 size_t len, loff_t *ppos) 439 { 440 struct xenbus_file_priv *u = filp->private_data; 441 uint32_t msg_type; 442 int rc = len; 443 int ret; 444 LIST_HEAD(staging_q); 445 446 /* 447 * We're expecting usermode to be writing properly formed 448 * xenbus messages. If they write an incomplete message we 449 * buffer it up. Once it is complete, we act on it. 450 */ 451 452 /* 453 * Make sure concurrent writers can't stomp all over each 454 * other's messages and make a mess of our partial message 455 * buffer. We don't make any attemppt to stop multiple 456 * writers from making a mess of each other's incomplete 457 * messages; we're just trying to guarantee our own internal 458 * consistency and make sure that single writes are handled 459 * atomically. 460 */ 461 mutex_lock(&u->msgbuffer_mutex); 462 463 /* Get this out of the way early to avoid confusion */ 464 if (len == 0) 465 goto out; 466 467 /* Can't write a xenbus message larger we can buffer */ 468 if (len > sizeof(u->u.buffer) - u->len) { 469 /* On error, dump existing buffer */ 470 u->len = 0; 471 rc = -EINVAL; 472 goto out; 473 } 474 475 ret = copy_from_user(u->u.buffer + u->len, ubuf, len); 476 477 if (ret != 0) { 478 rc = -EFAULT; 479 goto out; 480 } 481 482 /* Deal with a partial copy. */ 483 len -= ret; 484 rc = len; 485 486 u->len += len; 487 488 /* Return if we haven't got a full message yet */ 489 if (u->len < sizeof(u->u.msg)) 490 goto out; /* not even the header yet */ 491 492 /* If we're expecting a message that's larger than we can 493 possibly send, dump what we have and return an error. */ 494 if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) { 495 rc = -E2BIG; 496 u->len = 0; 497 goto out; 498 } 499 500 if (u->len < (sizeof(u->u.msg) + u->u.msg.len)) 501 goto out; /* incomplete data portion */ 502 503 /* 504 * OK, now we have a complete message. Do something with it. 505 */ 506 507 msg_type = u->u.msg.type; 508 509 switch (msg_type) { 510 case XS_WATCH: 511 case XS_UNWATCH: 512 /* (Un)Ask for some path to be watched for changes */ 513 ret = xenbus_write_watch(msg_type, u); 514 break; 515 516 default: 517 /* Send out a transaction */ 518 ret = xenbus_write_transaction(msg_type, u); 519 break; 520 } 521 if (ret != 0) 522 rc = ret; 523 524 /* Buffered message consumed */ 525 u->len = 0; 526 527 out: 528 mutex_unlock(&u->msgbuffer_mutex); 529 return rc; 530 } 531 532 static int xenbus_file_open(struct inode *inode, struct file *filp) 533 { 534 struct xenbus_file_priv *u; 535 536 if (xen_store_evtchn == 0) 537 return -ENOENT; 538 539 nonseekable_open(inode, filp); 540 541 u = kzalloc(sizeof(*u), GFP_KERNEL); 542 if (u == NULL) 543 return -ENOMEM; 544 545 INIT_LIST_HEAD(&u->transactions); 546 INIT_LIST_HEAD(&u->watches); 547 INIT_LIST_HEAD(&u->read_buffers); 548 init_waitqueue_head(&u->read_waitq); 549 550 mutex_init(&u->reply_mutex); 551 mutex_init(&u->msgbuffer_mutex); 552 553 filp->private_data = u; 554 555 return 0; 556 } 557 558 static int xenbus_file_release(struct inode *inode, struct file *filp) 559 { 560 struct xenbus_file_priv *u = filp->private_data; 561 struct xenbus_transaction_holder *trans, *tmp; 562 struct watch_adapter *watch, *tmp_watch; 563 struct read_buffer *rb, *tmp_rb; 564 565 /* 566 * No need for locking here because there are no other users, 567 * by definition. 568 */ 569 570 list_for_each_entry_safe(trans, tmp, &u->transactions, list) { 571 xenbus_transaction_end(trans->handle, 1); 572 list_del(&trans->list); 573 kfree(trans); 574 } 575 576 list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { 577 unregister_xenbus_watch(&watch->watch); 578 list_del(&watch->list); 579 free_watch_adapter(watch); 580 } 581 582 list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) { 583 list_del(&rb->list); 584 kfree(rb); 585 } 586 kfree(u); 587 588 return 0; 589 } 590 591 static unsigned int xenbus_file_poll(struct file *file, poll_table *wait) 592 { 593 struct xenbus_file_priv *u = file->private_data; 594 595 poll_wait(file, &u->read_waitq, wait); 596 if (!list_empty(&u->read_buffers)) 597 return POLLIN | POLLRDNORM; 598 return 0; 599 } 600 601 const struct file_operations xen_xenbus_fops = { 602 .read = xenbus_file_read, 603 .write = xenbus_file_write, 604 .open = xenbus_file_open, 605 .release = xenbus_file_release, 606 .poll = xenbus_file_poll, 607 .llseek = no_llseek, 608 }; 609 EXPORT_SYMBOL_GPL(xen_xenbus_fops); 610 611 static struct miscdevice xenbus_dev = { 612 .minor = MISC_DYNAMIC_MINOR, 613 .name = "xen/xenbus", 614 .fops = &xen_xenbus_fops, 615 }; 616 617 static int __init xenbus_init(void) 618 { 619 int err; 620 621 if (!xen_domain()) 622 return -ENODEV; 623 624 err = misc_register(&xenbus_dev); 625 if (err) 626 pr_err("Could not register xenbus frontend device\n"); 627 return err; 628 } 629 630 static void __exit xenbus_exit(void) 631 { 632 misc_deregister(&xenbus_dev); 633 } 634 635 module_init(xenbus_init); 636 module_exit(xenbus_exit); 637