1 /****************************************************************************** 2 * xenbus_xs.c 3 * 4 * This is the kernel equivalent of the "xs" library. We don't need everything 5 * and we use xenbus_comms for communication. 6 * 7 * Copyright (C) 2005 Rusty Russell, IBM Corporation 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License version 2 11 * as published by the Free Software Foundation; or, when distributed 12 * separately from the Linux kernel or incorporated into other 13 * software packages, subject to the following license: 14 * 15 * Permission is hereby granted, free of charge, to any person obtaining a copy 16 * of this source file (the "Software"), to deal in the Software without 17 * restriction, including without limitation the rights to use, copy, modify, 18 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 19 * and to permit persons to whom the Software is furnished to do so, subject to 20 * the following conditions: 21 * 22 * The above copyright notice and this permission notice shall be included in 23 * all copies or substantial portions of the Software. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 31 * IN THE SOFTWARE. 32 */ 33 34 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 35 36 #include <linux/unistd.h> 37 #include <linux/errno.h> 38 #include <linux/types.h> 39 #include <linux/uio.h> 40 #include <linux/kernel.h> 41 #include <linux/string.h> 42 #include <linux/err.h> 43 #include <linux/slab.h> 44 #include <linux/fcntl.h> 45 #include <linux/kthread.h> 46 #include <linux/rwsem.h> 47 #include <linux/mutex.h> 48 #include <asm/xen/hypervisor.h> 49 #include <xen/xenbus.h> 50 #include <xen/xen.h> 51 #include "xenbus_comms.h" 52 #include "xenbus_probe.h" 53 54 struct xs_stored_msg { 55 struct list_head list; 56 57 struct xsd_sockmsg hdr; 58 59 union { 60 /* Queued replies. */ 61 struct { 62 char *body; 63 } reply; 64 65 /* Queued watch events. */ 66 struct { 67 struct xenbus_watch *handle; 68 char **vec; 69 unsigned int vec_size; 70 } watch; 71 } u; 72 }; 73 74 struct xs_handle { 75 /* A list of replies. Currently only one will ever be outstanding. */ 76 struct list_head reply_list; 77 spinlock_t reply_lock; 78 wait_queue_head_t reply_waitq; 79 80 /* 81 * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. 82 * response_mutex is never taken simultaneously with the other three. 83 * 84 * transaction_mutex must be held before incrementing 85 * transaction_count. The mutex is held when a suspend is in 86 * progress to prevent new transactions starting. 87 * 88 * When decrementing transaction_count to zero the wait queue 89 * should be woken up, the suspend code waits for count to 90 * reach zero. 91 */ 92 93 /* One request at a time. */ 94 struct mutex request_mutex; 95 96 /* Protect xenbus reader thread against save/restore. */ 97 struct mutex response_mutex; 98 99 /* Protect transactions against save/restore. */ 100 struct mutex transaction_mutex; 101 atomic_t transaction_count; 102 wait_queue_head_t transaction_wq; 103 104 /* Protect watch (de)register against save/restore. */ 105 struct rw_semaphore watch_mutex; 106 }; 107 108 static struct xs_handle xs_state; 109 110 /* List of registered watches, and a lock to protect it. */ 111 static LIST_HEAD(watches); 112 static DEFINE_SPINLOCK(watches_lock); 113 114 /* List of pending watch callback events, and a lock to protect it. */ 115 static LIST_HEAD(watch_events); 116 static DEFINE_SPINLOCK(watch_events_lock); 117 118 /* 119 * Details of the xenwatch callback kernel thread. The thread waits on the 120 * watch_events_waitq for work to do (queued on watch_events list). When it 121 * wakes up it acquires the xenwatch_mutex before reading the list and 122 * carrying out work. 123 */ 124 static pid_t xenwatch_pid; 125 static DEFINE_MUTEX(xenwatch_mutex); 126 static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); 127 128 static int get_error(const char *errorstring) 129 { 130 unsigned int i; 131 132 for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) { 133 if (i == ARRAY_SIZE(xsd_errors) - 1) { 134 pr_warn("xen store gave: unknown error %s\n", 135 errorstring); 136 return EINVAL; 137 } 138 } 139 return xsd_errors[i].errnum; 140 } 141 142 static bool xenbus_ok(void) 143 { 144 switch (xen_store_domain_type) { 145 case XS_LOCAL: 146 switch (system_state) { 147 case SYSTEM_POWER_OFF: 148 case SYSTEM_RESTART: 149 case SYSTEM_HALT: 150 return false; 151 default: 152 break; 153 } 154 return true; 155 case XS_PV: 156 case XS_HVM: 157 /* FIXME: Could check that the remote domain is alive, 158 * but it is normally initial domain. */ 159 return true; 160 default: 161 break; 162 } 163 return false; 164 } 165 static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) 166 { 167 struct xs_stored_msg *msg; 168 char *body; 169 170 spin_lock(&xs_state.reply_lock); 171 172 while (list_empty(&xs_state.reply_list)) { 173 spin_unlock(&xs_state.reply_lock); 174 if (xenbus_ok()) 175 /* XXX FIXME: Avoid synchronous wait for response here. */ 176 wait_event_timeout(xs_state.reply_waitq, 177 !list_empty(&xs_state.reply_list), 178 msecs_to_jiffies(500)); 179 else { 180 /* 181 * If we are in the process of being shut-down there is 182 * no point of trying to contact XenBus - it is either 183 * killed (xenstored application) or the other domain 184 * has been killed or is unreachable. 185 */ 186 return ERR_PTR(-EIO); 187 } 188 spin_lock(&xs_state.reply_lock); 189 } 190 191 msg = list_entry(xs_state.reply_list.next, 192 struct xs_stored_msg, list); 193 list_del(&msg->list); 194 195 spin_unlock(&xs_state.reply_lock); 196 197 *type = msg->hdr.type; 198 if (len) 199 *len = msg->hdr.len; 200 body = msg->u.reply.body; 201 202 kfree(msg); 203 204 return body; 205 } 206 207 static void transaction_start(void) 208 { 209 mutex_lock(&xs_state.transaction_mutex); 210 atomic_inc(&xs_state.transaction_count); 211 mutex_unlock(&xs_state.transaction_mutex); 212 } 213 214 static void transaction_end(void) 215 { 216 if (atomic_dec_and_test(&xs_state.transaction_count)) 217 wake_up(&xs_state.transaction_wq); 218 } 219 220 static void transaction_suspend(void) 221 { 222 mutex_lock(&xs_state.transaction_mutex); 223 wait_event(xs_state.transaction_wq, 224 atomic_read(&xs_state.transaction_count) == 0); 225 } 226 227 static void transaction_resume(void) 228 { 229 mutex_unlock(&xs_state.transaction_mutex); 230 } 231 232 void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) 233 { 234 void *ret; 235 enum xsd_sockmsg_type type = msg->type; 236 int err; 237 238 if (type == XS_TRANSACTION_START) 239 transaction_start(); 240 241 mutex_lock(&xs_state.request_mutex); 242 243 err = xb_write(msg, sizeof(*msg) + msg->len); 244 if (err) { 245 msg->type = XS_ERROR; 246 ret = ERR_PTR(err); 247 } else 248 ret = read_reply(&msg->type, &msg->len); 249 250 mutex_unlock(&xs_state.request_mutex); 251 252 if ((msg->type == XS_TRANSACTION_END) || 253 ((type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) 254 transaction_end(); 255 256 return ret; 257 } 258 EXPORT_SYMBOL(xenbus_dev_request_and_reply); 259 260 /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ 261 static void *xs_talkv(struct xenbus_transaction t, 262 enum xsd_sockmsg_type type, 263 const struct kvec *iovec, 264 unsigned int num_vecs, 265 unsigned int *len) 266 { 267 struct xsd_sockmsg msg; 268 void *ret = NULL; 269 unsigned int i; 270 int err; 271 272 msg.tx_id = t.id; 273 msg.req_id = 0; 274 msg.type = type; 275 msg.len = 0; 276 for (i = 0; i < num_vecs; i++) 277 msg.len += iovec[i].iov_len; 278 279 mutex_lock(&xs_state.request_mutex); 280 281 err = xb_write(&msg, sizeof(msg)); 282 if (err) { 283 mutex_unlock(&xs_state.request_mutex); 284 return ERR_PTR(err); 285 } 286 287 for (i = 0; i < num_vecs; i++) { 288 err = xb_write(iovec[i].iov_base, iovec[i].iov_len); 289 if (err) { 290 mutex_unlock(&xs_state.request_mutex); 291 return ERR_PTR(err); 292 } 293 } 294 295 ret = read_reply(&msg.type, len); 296 297 mutex_unlock(&xs_state.request_mutex); 298 299 if (IS_ERR(ret)) 300 return ret; 301 302 if (msg.type == XS_ERROR) { 303 err = get_error(ret); 304 kfree(ret); 305 return ERR_PTR(-err); 306 } 307 308 if (msg.type != type) { 309 pr_warn_ratelimited("unexpected type [%d], expected [%d]\n", 310 msg.type, type); 311 kfree(ret); 312 return ERR_PTR(-EINVAL); 313 } 314 return ret; 315 } 316 317 /* Simplified version of xs_talkv: single message. */ 318 static void *xs_single(struct xenbus_transaction t, 319 enum xsd_sockmsg_type type, 320 const char *string, 321 unsigned int *len) 322 { 323 struct kvec iovec; 324 325 iovec.iov_base = (void *)string; 326 iovec.iov_len = strlen(string) + 1; 327 return xs_talkv(t, type, &iovec, 1, len); 328 } 329 330 /* Many commands only need an ack, don't care what it says. */ 331 static int xs_error(char *reply) 332 { 333 if (IS_ERR(reply)) 334 return PTR_ERR(reply); 335 kfree(reply); 336 return 0; 337 } 338 339 static unsigned int count_strings(const char *strings, unsigned int len) 340 { 341 unsigned int num; 342 const char *p; 343 344 for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) 345 num++; 346 347 return num; 348 } 349 350 /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ 351 static char *join(const char *dir, const char *name) 352 { 353 char *buffer; 354 355 if (strlen(name) == 0) 356 buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir); 357 else 358 buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name); 359 return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; 360 } 361 362 static char **split(char *strings, unsigned int len, unsigned int *num) 363 { 364 char *p, **ret; 365 366 /* Count the strings. */ 367 *num = count_strings(strings, len); 368 369 /* Transfer to one big alloc for easy freeing. */ 370 ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH); 371 if (!ret) { 372 kfree(strings); 373 return ERR_PTR(-ENOMEM); 374 } 375 memcpy(&ret[*num], strings, len); 376 kfree(strings); 377 378 strings = (char *)&ret[*num]; 379 for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1) 380 ret[(*num)++] = p; 381 382 return ret; 383 } 384 385 char **xenbus_directory(struct xenbus_transaction t, 386 const char *dir, const char *node, unsigned int *num) 387 { 388 char *strings, *path; 389 unsigned int len; 390 391 path = join(dir, node); 392 if (IS_ERR(path)) 393 return (char **)path; 394 395 strings = xs_single(t, XS_DIRECTORY, path, &len); 396 kfree(path); 397 if (IS_ERR(strings)) 398 return (char **)strings; 399 400 return split(strings, len, num); 401 } 402 EXPORT_SYMBOL_GPL(xenbus_directory); 403 404 /* Check if a path exists. Return 1 if it does. */ 405 int xenbus_exists(struct xenbus_transaction t, 406 const char *dir, const char *node) 407 { 408 char **d; 409 int dir_n; 410 411 d = xenbus_directory(t, dir, node, &dir_n); 412 if (IS_ERR(d)) 413 return 0; 414 kfree(d); 415 return 1; 416 } 417 EXPORT_SYMBOL_GPL(xenbus_exists); 418 419 /* Get the value of a single file. 420 * Returns a kmalloced value: call free() on it after use. 421 * len indicates length in bytes. 422 */ 423 void *xenbus_read(struct xenbus_transaction t, 424 const char *dir, const char *node, unsigned int *len) 425 { 426 char *path; 427 void *ret; 428 429 path = join(dir, node); 430 if (IS_ERR(path)) 431 return (void *)path; 432 433 ret = xs_single(t, XS_READ, path, len); 434 kfree(path); 435 return ret; 436 } 437 EXPORT_SYMBOL_GPL(xenbus_read); 438 439 /* Write the value of a single file. 440 * Returns -err on failure. 441 */ 442 int xenbus_write(struct xenbus_transaction t, 443 const char *dir, const char *node, const char *string) 444 { 445 const char *path; 446 struct kvec iovec[2]; 447 int ret; 448 449 path = join(dir, node); 450 if (IS_ERR(path)) 451 return PTR_ERR(path); 452 453 iovec[0].iov_base = (void *)path; 454 iovec[0].iov_len = strlen(path) + 1; 455 iovec[1].iov_base = (void *)string; 456 iovec[1].iov_len = strlen(string); 457 458 ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); 459 kfree(path); 460 return ret; 461 } 462 EXPORT_SYMBOL_GPL(xenbus_write); 463 464 /* Create a new directory. */ 465 int xenbus_mkdir(struct xenbus_transaction t, 466 const char *dir, const char *node) 467 { 468 char *path; 469 int ret; 470 471 path = join(dir, node); 472 if (IS_ERR(path)) 473 return PTR_ERR(path); 474 475 ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); 476 kfree(path); 477 return ret; 478 } 479 EXPORT_SYMBOL_GPL(xenbus_mkdir); 480 481 /* Destroy a file or directory (directories must be empty). */ 482 int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node) 483 { 484 char *path; 485 int ret; 486 487 path = join(dir, node); 488 if (IS_ERR(path)) 489 return PTR_ERR(path); 490 491 ret = xs_error(xs_single(t, XS_RM, path, NULL)); 492 kfree(path); 493 return ret; 494 } 495 EXPORT_SYMBOL_GPL(xenbus_rm); 496 497 /* Start a transaction: changes by others will not be seen during this 498 * transaction, and changes will not be visible to others until end. 499 */ 500 int xenbus_transaction_start(struct xenbus_transaction *t) 501 { 502 char *id_str; 503 504 transaction_start(); 505 506 id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); 507 if (IS_ERR(id_str)) { 508 transaction_end(); 509 return PTR_ERR(id_str); 510 } 511 512 t->id = simple_strtoul(id_str, NULL, 0); 513 kfree(id_str); 514 return 0; 515 } 516 EXPORT_SYMBOL_GPL(xenbus_transaction_start); 517 518 /* End a transaction. 519 * If abandon is true, transaction is discarded instead of committed. 520 */ 521 int xenbus_transaction_end(struct xenbus_transaction t, int abort) 522 { 523 char abortstr[2]; 524 int err; 525 526 if (abort) 527 strcpy(abortstr, "F"); 528 else 529 strcpy(abortstr, "T"); 530 531 err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); 532 533 transaction_end(); 534 535 return err; 536 } 537 EXPORT_SYMBOL_GPL(xenbus_transaction_end); 538 539 /* Single read and scanf: returns -errno or num scanned. */ 540 int xenbus_scanf(struct xenbus_transaction t, 541 const char *dir, const char *node, const char *fmt, ...) 542 { 543 va_list ap; 544 int ret; 545 char *val; 546 547 val = xenbus_read(t, dir, node, NULL); 548 if (IS_ERR(val)) 549 return PTR_ERR(val); 550 551 va_start(ap, fmt); 552 ret = vsscanf(val, fmt, ap); 553 va_end(ap); 554 kfree(val); 555 /* Distinctive errno. */ 556 if (ret == 0) 557 return -ERANGE; 558 return ret; 559 } 560 EXPORT_SYMBOL_GPL(xenbus_scanf); 561 562 /* Read an (optional) unsigned value. */ 563 unsigned int xenbus_read_unsigned(const char *dir, const char *node, 564 unsigned int default_val) 565 { 566 unsigned int val; 567 int ret; 568 569 ret = xenbus_scanf(XBT_NIL, dir, node, "%u", &val); 570 if (ret <= 0) 571 val = default_val; 572 573 return val; 574 } 575 EXPORT_SYMBOL_GPL(xenbus_read_unsigned); 576 577 /* Single printf and write: returns -errno or 0. */ 578 int xenbus_printf(struct xenbus_transaction t, 579 const char *dir, const char *node, const char *fmt, ...) 580 { 581 va_list ap; 582 int ret; 583 char *buf; 584 585 va_start(ap, fmt); 586 buf = kvasprintf(GFP_NOIO | __GFP_HIGH, fmt, ap); 587 va_end(ap); 588 589 if (!buf) 590 return -ENOMEM; 591 592 ret = xenbus_write(t, dir, node, buf); 593 594 kfree(buf); 595 596 return ret; 597 } 598 EXPORT_SYMBOL_GPL(xenbus_printf); 599 600 /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ 601 int xenbus_gather(struct xenbus_transaction t, const char *dir, ...) 602 { 603 va_list ap; 604 const char *name; 605 int ret = 0; 606 607 va_start(ap, dir); 608 while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { 609 const char *fmt = va_arg(ap, char *); 610 void *result = va_arg(ap, void *); 611 char *p; 612 613 p = xenbus_read(t, dir, name, NULL); 614 if (IS_ERR(p)) { 615 ret = PTR_ERR(p); 616 break; 617 } 618 if (fmt) { 619 if (sscanf(p, fmt, result) == 0) 620 ret = -EINVAL; 621 kfree(p); 622 } else 623 *(char **)result = p; 624 } 625 va_end(ap); 626 return ret; 627 } 628 EXPORT_SYMBOL_GPL(xenbus_gather); 629 630 static int xs_watch(const char *path, const char *token) 631 { 632 struct kvec iov[2]; 633 634 iov[0].iov_base = (void *)path; 635 iov[0].iov_len = strlen(path) + 1; 636 iov[1].iov_base = (void *)token; 637 iov[1].iov_len = strlen(token) + 1; 638 639 return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov, 640 ARRAY_SIZE(iov), NULL)); 641 } 642 643 static int xs_unwatch(const char *path, const char *token) 644 { 645 struct kvec iov[2]; 646 647 iov[0].iov_base = (char *)path; 648 iov[0].iov_len = strlen(path) + 1; 649 iov[1].iov_base = (char *)token; 650 iov[1].iov_len = strlen(token) + 1; 651 652 return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov, 653 ARRAY_SIZE(iov), NULL)); 654 } 655 656 static struct xenbus_watch *find_watch(const char *token) 657 { 658 struct xenbus_watch *i, *cmp; 659 660 cmp = (void *)simple_strtoul(token, NULL, 16); 661 662 list_for_each_entry(i, &watches, list) 663 if (i == cmp) 664 return i; 665 666 return NULL; 667 } 668 /* 669 * Certain older XenBus toolstack cannot handle reading values that are 670 * not populated. Some Xen 3.4 installation are incapable of doing this 671 * so if we are running on anything older than 4 do not attempt to read 672 * control/platform-feature-xs_reset_watches. 673 */ 674 static bool xen_strict_xenbus_quirk(void) 675 { 676 #ifdef CONFIG_X86 677 uint32_t eax, ebx, ecx, edx, base; 678 679 base = xen_cpuid_base(); 680 cpuid(base + 1, &eax, &ebx, &ecx, &edx); 681 682 if ((eax >> 16) < 4) 683 return true; 684 #endif 685 return false; 686 687 } 688 static void xs_reset_watches(void) 689 { 690 int err; 691 692 if (!xen_hvm_domain() || xen_initial_domain()) 693 return; 694 695 if (xen_strict_xenbus_quirk()) 696 return; 697 698 if (!xenbus_read_unsigned("control", 699 "platform-feature-xs_reset_watches", 0)) 700 return; 701 702 err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL)); 703 if (err && err != -EEXIST) 704 pr_warn("xs_reset_watches failed: %d\n", err); 705 } 706 707 /* Register callback to watch this node. */ 708 int register_xenbus_watch(struct xenbus_watch *watch) 709 { 710 /* Pointer in ascii is the token. */ 711 char token[sizeof(watch) * 2 + 1]; 712 int err; 713 714 sprintf(token, "%lX", (long)watch); 715 716 down_read(&xs_state.watch_mutex); 717 718 spin_lock(&watches_lock); 719 BUG_ON(find_watch(token)); 720 list_add(&watch->list, &watches); 721 spin_unlock(&watches_lock); 722 723 err = xs_watch(watch->node, token); 724 725 if (err) { 726 spin_lock(&watches_lock); 727 list_del(&watch->list); 728 spin_unlock(&watches_lock); 729 } 730 731 up_read(&xs_state.watch_mutex); 732 733 return err; 734 } 735 EXPORT_SYMBOL_GPL(register_xenbus_watch); 736 737 void unregister_xenbus_watch(struct xenbus_watch *watch) 738 { 739 struct xs_stored_msg *msg, *tmp; 740 char token[sizeof(watch) * 2 + 1]; 741 int err; 742 743 sprintf(token, "%lX", (long)watch); 744 745 down_read(&xs_state.watch_mutex); 746 747 spin_lock(&watches_lock); 748 BUG_ON(!find_watch(token)); 749 list_del(&watch->list); 750 spin_unlock(&watches_lock); 751 752 err = xs_unwatch(watch->node, token); 753 if (err) 754 pr_warn("Failed to release watch %s: %i\n", watch->node, err); 755 756 up_read(&xs_state.watch_mutex); 757 758 /* Make sure there are no callbacks running currently (unless 759 its us) */ 760 if (current->pid != xenwatch_pid) 761 mutex_lock(&xenwatch_mutex); 762 763 /* Cancel pending watch events. */ 764 spin_lock(&watch_events_lock); 765 list_for_each_entry_safe(msg, tmp, &watch_events, list) { 766 if (msg->u.watch.handle != watch) 767 continue; 768 list_del(&msg->list); 769 kfree(msg->u.watch.vec); 770 kfree(msg); 771 } 772 spin_unlock(&watch_events_lock); 773 774 if (current->pid != xenwatch_pid) 775 mutex_unlock(&xenwatch_mutex); 776 } 777 EXPORT_SYMBOL_GPL(unregister_xenbus_watch); 778 779 void xs_suspend(void) 780 { 781 transaction_suspend(); 782 down_write(&xs_state.watch_mutex); 783 mutex_lock(&xs_state.request_mutex); 784 mutex_lock(&xs_state.response_mutex); 785 } 786 787 void xs_resume(void) 788 { 789 struct xenbus_watch *watch; 790 char token[sizeof(watch) * 2 + 1]; 791 792 xb_init_comms(); 793 794 mutex_unlock(&xs_state.response_mutex); 795 mutex_unlock(&xs_state.request_mutex); 796 transaction_resume(); 797 798 /* No need for watches_lock: the watch_mutex is sufficient. */ 799 list_for_each_entry(watch, &watches, list) { 800 sprintf(token, "%lX", (long)watch); 801 xs_watch(watch->node, token); 802 } 803 804 up_write(&xs_state.watch_mutex); 805 } 806 807 void xs_suspend_cancel(void) 808 { 809 mutex_unlock(&xs_state.response_mutex); 810 mutex_unlock(&xs_state.request_mutex); 811 up_write(&xs_state.watch_mutex); 812 mutex_unlock(&xs_state.transaction_mutex); 813 } 814 815 static int xenwatch_thread(void *unused) 816 { 817 struct list_head *ent; 818 struct xs_stored_msg *msg; 819 820 for (;;) { 821 wait_event_interruptible(watch_events_waitq, 822 !list_empty(&watch_events)); 823 824 if (kthread_should_stop()) 825 break; 826 827 mutex_lock(&xenwatch_mutex); 828 829 spin_lock(&watch_events_lock); 830 ent = watch_events.next; 831 if (ent != &watch_events) 832 list_del(ent); 833 spin_unlock(&watch_events_lock); 834 835 if (ent != &watch_events) { 836 msg = list_entry(ent, struct xs_stored_msg, list); 837 msg->u.watch.handle->callback( 838 msg->u.watch.handle, 839 (const char **)msg->u.watch.vec, 840 msg->u.watch.vec_size); 841 kfree(msg->u.watch.vec); 842 kfree(msg); 843 } 844 845 mutex_unlock(&xenwatch_mutex); 846 } 847 848 return 0; 849 } 850 851 static int process_msg(void) 852 { 853 struct xs_stored_msg *msg; 854 char *body; 855 int err; 856 857 /* 858 * We must disallow save/restore while reading a xenstore message. 859 * A partial read across s/r leaves us out of sync with xenstored. 860 */ 861 for (;;) { 862 err = xb_wait_for_data_to_read(); 863 if (err) 864 return err; 865 mutex_lock(&xs_state.response_mutex); 866 if (xb_data_to_read()) 867 break; 868 /* We raced with save/restore: pending data 'disappeared'. */ 869 mutex_unlock(&xs_state.response_mutex); 870 } 871 872 873 msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH); 874 if (msg == NULL) { 875 err = -ENOMEM; 876 goto out; 877 } 878 879 err = xb_read(&msg->hdr, sizeof(msg->hdr)); 880 if (err) { 881 kfree(msg); 882 goto out; 883 } 884 885 if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) { 886 kfree(msg); 887 err = -EINVAL; 888 goto out; 889 } 890 891 body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); 892 if (body == NULL) { 893 kfree(msg); 894 err = -ENOMEM; 895 goto out; 896 } 897 898 err = xb_read(body, msg->hdr.len); 899 if (err) { 900 kfree(body); 901 kfree(msg); 902 goto out; 903 } 904 body[msg->hdr.len] = '\0'; 905 906 if (msg->hdr.type == XS_WATCH_EVENT) { 907 msg->u.watch.vec = split(body, msg->hdr.len, 908 &msg->u.watch.vec_size); 909 if (IS_ERR(msg->u.watch.vec)) { 910 err = PTR_ERR(msg->u.watch.vec); 911 kfree(msg); 912 goto out; 913 } 914 915 spin_lock(&watches_lock); 916 msg->u.watch.handle = find_watch( 917 msg->u.watch.vec[XS_WATCH_TOKEN]); 918 if (msg->u.watch.handle != NULL) { 919 spin_lock(&watch_events_lock); 920 list_add_tail(&msg->list, &watch_events); 921 wake_up(&watch_events_waitq); 922 spin_unlock(&watch_events_lock); 923 } else { 924 kfree(msg->u.watch.vec); 925 kfree(msg); 926 } 927 spin_unlock(&watches_lock); 928 } else { 929 msg->u.reply.body = body; 930 spin_lock(&xs_state.reply_lock); 931 list_add_tail(&msg->list, &xs_state.reply_list); 932 spin_unlock(&xs_state.reply_lock); 933 wake_up(&xs_state.reply_waitq); 934 } 935 936 out: 937 mutex_unlock(&xs_state.response_mutex); 938 return err; 939 } 940 941 static int xenbus_thread(void *unused) 942 { 943 int err; 944 945 for (;;) { 946 err = process_msg(); 947 if (err) 948 pr_warn("error %d while reading message\n", err); 949 if (kthread_should_stop()) 950 break; 951 } 952 953 return 0; 954 } 955 956 int xs_init(void) 957 { 958 int err; 959 struct task_struct *task; 960 961 INIT_LIST_HEAD(&xs_state.reply_list); 962 spin_lock_init(&xs_state.reply_lock); 963 init_waitqueue_head(&xs_state.reply_waitq); 964 965 mutex_init(&xs_state.request_mutex); 966 mutex_init(&xs_state.response_mutex); 967 mutex_init(&xs_state.transaction_mutex); 968 init_rwsem(&xs_state.watch_mutex); 969 atomic_set(&xs_state.transaction_count, 0); 970 init_waitqueue_head(&xs_state.transaction_wq); 971 972 /* Initialize the shared memory rings to talk to xenstored */ 973 err = xb_init_comms(); 974 if (err) 975 return err; 976 977 task = kthread_run(xenwatch_thread, NULL, "xenwatch"); 978 if (IS_ERR(task)) 979 return PTR_ERR(task); 980 xenwatch_pid = task->pid; 981 982 task = kthread_run(xenbus_thread, NULL, "xenbus"); 983 if (IS_ERR(task)) 984 return PTR_ERR(task); 985 986 /* shutdown watches for kexec boot */ 987 xs_reset_watches(); 988 989 return 0; 990 } 991