1 /****************************************************************************** 2 * Talks to Xen Store to figure out what devices we have. 3 * 4 * Copyright (C) 2005 Rusty Russell, IBM Corporation 5 * Copyright (C) 2005 Mike Wray, Hewlett-Packard 6 * Copyright (C) 2005, 2006 XenSource Ltd 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License version 2 10 * as published by the Free Software Foundation; or, when distributed 11 * separately from the Linux kernel or incorporated into other 12 * software packages, subject to the following license: 13 * 14 * Permission is hereby granted, free of charge, to any person obtaining a copy 15 * of this source file (the "Software"), to deal in the Software without 16 * restriction, including without limitation the rights to use, copy, modify, 17 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 18 * and to permit persons to whom the Software is furnished to do so, subject to 19 * the following conditions: 20 * 21 * The above copyright notice and this permission notice shall be included in 22 * all copies or substantial portions of the Software. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 30 * IN THE SOFTWARE. 31 */ 32 33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 #define dev_fmt pr_fmt 35 36 #define DPRINTK(fmt, args...) \ 37 pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ 38 __func__, __LINE__, ##args) 39 40 #include <linux/kernel.h> 41 #include <linux/err.h> 42 #include <linux/string.h> 43 #include <linux/ctype.h> 44 #include <linux/fcntl.h> 45 #include <linux/mm.h> 46 #include <linux/proc_fs.h> 47 #include <linux/notifier.h> 48 #include <linux/kthread.h> 49 #include <linux/mutex.h> 50 #include <linux/io.h> 51 #include <linux/slab.h> 52 #include <linux/module.h> 53 54 #include <asm/page.h> 55 #include <asm/xen/hypervisor.h> 56 57 #include <xen/xen.h> 58 #include <xen/xenbus.h> 59 #include <xen/events.h> 60 #include <xen/xen-ops.h> 61 #include <xen/page.h> 62 63 #include <xen/hvm.h> 64 65 #include "xenbus.h" 66 67 68 int xen_store_evtchn; 69 EXPORT_SYMBOL_GPL(xen_store_evtchn); 70 71 struct xenstore_domain_interface *xen_store_interface; 72 EXPORT_SYMBOL_GPL(xen_store_interface); 73 74 enum xenstore_init xen_store_domain_type; 75 EXPORT_SYMBOL_GPL(xen_store_domain_type); 76 77 static unsigned long xen_store_gfn; 78 79 static BLOCKING_NOTIFIER_HEAD(xenstore_chain); 80 81 /* If something in array of ids matches this device, return it. */ 82 static const struct xenbus_device_id * 83 match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) 84 { 85 for (; *arr->devicetype != '\0'; arr++) { 86 if (!strcmp(arr->devicetype, dev->devicetype)) 87 return arr; 88 } 89 return NULL; 90 } 91 92 int xenbus_match(struct device *_dev, struct device_driver *_drv) 93 { 94 struct xenbus_driver *drv = to_xenbus_driver(_drv); 95 96 if (!drv->ids) 97 return 0; 98 99 return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; 100 } 101 EXPORT_SYMBOL_GPL(xenbus_match); 102 103 104 static void free_otherend_details(struct xenbus_device *dev) 105 { 106 kfree(dev->otherend); 107 dev->otherend = NULL; 108 } 109 110 111 static void free_otherend_watch(struct xenbus_device *dev) 112 { 113 if (dev->otherend_watch.node) { 114 unregister_xenbus_watch(&dev->otherend_watch); 115 kfree(dev->otherend_watch.node); 116 dev->otherend_watch.node = NULL; 117 } 118 } 119 120 121 static int talk_to_otherend(struct xenbus_device *dev) 122 { 123 struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); 124 125 free_otherend_watch(dev); 126 free_otherend_details(dev); 127 128 return drv->read_otherend_details(dev); 129 } 130 131 132 133 static int watch_otherend(struct xenbus_device *dev) 134 { 135 struct xen_bus_type *bus = 136 container_of(dev->dev.bus, struct xen_bus_type, bus); 137 138 return xenbus_watch_pathfmt(dev, &dev->otherend_watch, 139 bus->otherend_will_handle, 140 bus->otherend_changed, 141 "%s/%s", dev->otherend, "state"); 142 } 143 144 145 int xenbus_read_otherend_details(struct xenbus_device *xendev, 146 char *id_node, char *path_node) 147 { 148 int err = xenbus_gather(XBT_NIL, xendev->nodename, 149 id_node, "%i", &xendev->otherend_id, 150 path_node, NULL, &xendev->otherend, 151 NULL); 152 if (err) { 153 xenbus_dev_fatal(xendev, err, 154 "reading other end details from %s", 155 xendev->nodename); 156 return err; 157 } 158 if (strlen(xendev->otherend) == 0 || 159 !xenbus_exists(XBT_NIL, xendev->otherend, "")) { 160 xenbus_dev_fatal(xendev, -ENOENT, 161 "unable to read other end from %s. " 162 "missing or inaccessible.", 163 xendev->nodename); 164 free_otherend_details(xendev); 165 return -ENOENT; 166 } 167 168 return 0; 169 } 170 EXPORT_SYMBOL_GPL(xenbus_read_otherend_details); 171 172 void xenbus_otherend_changed(struct xenbus_watch *watch, 173 const char *path, const char *token, 174 int ignore_on_shutdown) 175 { 176 struct xenbus_device *dev = 177 container_of(watch, struct xenbus_device, otherend_watch); 178 struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); 179 enum xenbus_state state; 180 181 /* Protect us against watches firing on old details when the otherend 182 details change, say immediately after a resume. */ 183 if (!dev->otherend || 184 strncmp(dev->otherend, path, strlen(dev->otherend))) { 185 dev_dbg(&dev->dev, "Ignoring watch at %s\n", path); 186 return; 187 } 188 189 state = xenbus_read_driver_state(dev->otherend); 190 191 dev_dbg(&dev->dev, "state is %d, (%s), %s, %s\n", 192 state, xenbus_strstate(state), dev->otherend_watch.node, path); 193 194 /* 195 * Ignore xenbus transitions during shutdown. This prevents us doing 196 * work that can fail e.g., when the rootfs is gone. 197 */ 198 if (system_state > SYSTEM_RUNNING) { 199 if (ignore_on_shutdown && (state == XenbusStateClosing)) 200 xenbus_frontend_closed(dev); 201 return; 202 } 203 204 if (drv->otherend_changed) 205 drv->otherend_changed(dev, state); 206 } 207 EXPORT_SYMBOL_GPL(xenbus_otherend_changed); 208 209 #define XENBUS_SHOW_STAT(name) \ 210 static ssize_t name##_show(struct device *_dev, \ 211 struct device_attribute *attr, \ 212 char *buf) \ 213 { \ 214 struct xenbus_device *dev = to_xenbus_device(_dev); \ 215 \ 216 return sprintf(buf, "%d\n", atomic_read(&dev->name)); \ 217 } \ 218 static DEVICE_ATTR_RO(name) 219 220 XENBUS_SHOW_STAT(event_channels); 221 XENBUS_SHOW_STAT(events); 222 XENBUS_SHOW_STAT(spurious_events); 223 XENBUS_SHOW_STAT(jiffies_eoi_delayed); 224 225 static ssize_t spurious_threshold_show(struct device *_dev, 226 struct device_attribute *attr, 227 char *buf) 228 { 229 struct xenbus_device *dev = to_xenbus_device(_dev); 230 231 return sprintf(buf, "%d\n", dev->spurious_threshold); 232 } 233 234 static ssize_t spurious_threshold_store(struct device *_dev, 235 struct device_attribute *attr, 236 const char *buf, size_t count) 237 { 238 struct xenbus_device *dev = to_xenbus_device(_dev); 239 unsigned int val; 240 ssize_t ret; 241 242 ret = kstrtouint(buf, 0, &val); 243 if (ret) 244 return ret; 245 246 dev->spurious_threshold = val; 247 248 return count; 249 } 250 251 static DEVICE_ATTR_RW(spurious_threshold); 252 253 static struct attribute *xenbus_attrs[] = { 254 &dev_attr_event_channels.attr, 255 &dev_attr_events.attr, 256 &dev_attr_spurious_events.attr, 257 &dev_attr_jiffies_eoi_delayed.attr, 258 &dev_attr_spurious_threshold.attr, 259 NULL 260 }; 261 262 static const struct attribute_group xenbus_group = { 263 .name = "xenbus", 264 .attrs = xenbus_attrs, 265 }; 266 267 int xenbus_dev_probe(struct device *_dev) 268 { 269 struct xenbus_device *dev = to_xenbus_device(_dev); 270 struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); 271 const struct xenbus_device_id *id; 272 int err; 273 274 DPRINTK("%s", dev->nodename); 275 276 if (!drv->probe) { 277 err = -ENODEV; 278 goto fail; 279 } 280 281 id = match_device(drv->ids, dev); 282 if (!id) { 283 err = -ENODEV; 284 goto fail; 285 } 286 287 err = talk_to_otherend(dev); 288 if (err) { 289 dev_warn(&dev->dev, "talk_to_otherend on %s failed.\n", 290 dev->nodename); 291 return err; 292 } 293 294 if (!try_module_get(drv->driver.owner)) { 295 dev_warn(&dev->dev, "failed to acquire module reference on '%s'\n", 296 drv->driver.name); 297 err = -ESRCH; 298 goto fail; 299 } 300 301 down(&dev->reclaim_sem); 302 err = drv->probe(dev, id); 303 up(&dev->reclaim_sem); 304 if (err) 305 goto fail_put; 306 307 err = watch_otherend(dev); 308 if (err) { 309 dev_warn(&dev->dev, "watch_otherend on %s failed.\n", 310 dev->nodename); 311 return err; 312 } 313 314 dev->spurious_threshold = 1; 315 if (sysfs_create_group(&dev->dev.kobj, &xenbus_group)) 316 dev_warn(&dev->dev, "sysfs_create_group on %s failed.\n", 317 dev->nodename); 318 319 return 0; 320 fail_put: 321 module_put(drv->driver.owner); 322 fail: 323 xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); 324 return err; 325 } 326 EXPORT_SYMBOL_GPL(xenbus_dev_probe); 327 328 void xenbus_dev_remove(struct device *_dev) 329 { 330 struct xenbus_device *dev = to_xenbus_device(_dev); 331 struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); 332 333 DPRINTK("%s", dev->nodename); 334 335 sysfs_remove_group(&dev->dev.kobj, &xenbus_group); 336 337 free_otherend_watch(dev); 338 339 if (drv->remove) { 340 down(&dev->reclaim_sem); 341 drv->remove(dev); 342 up(&dev->reclaim_sem); 343 } 344 345 module_put(drv->driver.owner); 346 347 free_otherend_details(dev); 348 349 /* 350 * If the toolstack has forced the device state to closing then set 351 * the state to closed now to allow it to be cleaned up. 352 * Similarly, if the driver does not support re-bind, set the 353 * closed. 354 */ 355 if (!drv->allow_rebind || 356 xenbus_read_driver_state(dev->nodename) == XenbusStateClosing) 357 xenbus_switch_state(dev, XenbusStateClosed); 358 } 359 EXPORT_SYMBOL_GPL(xenbus_dev_remove); 360 361 int xenbus_register_driver_common(struct xenbus_driver *drv, 362 struct xen_bus_type *bus, 363 struct module *owner, const char *mod_name) 364 { 365 drv->driver.name = drv->name ? drv->name : drv->ids[0].devicetype; 366 drv->driver.bus = &bus->bus; 367 drv->driver.owner = owner; 368 drv->driver.mod_name = mod_name; 369 370 return driver_register(&drv->driver); 371 } 372 EXPORT_SYMBOL_GPL(xenbus_register_driver_common); 373 374 void xenbus_unregister_driver(struct xenbus_driver *drv) 375 { 376 driver_unregister(&drv->driver); 377 } 378 EXPORT_SYMBOL_GPL(xenbus_unregister_driver); 379 380 struct xb_find_info { 381 struct xenbus_device *dev; 382 const char *nodename; 383 }; 384 385 static int cmp_dev(struct device *dev, void *data) 386 { 387 struct xenbus_device *xendev = to_xenbus_device(dev); 388 struct xb_find_info *info = data; 389 390 if (!strcmp(xendev->nodename, info->nodename)) { 391 info->dev = xendev; 392 get_device(dev); 393 return 1; 394 } 395 return 0; 396 } 397 398 static struct xenbus_device *xenbus_device_find(const char *nodename, 399 struct bus_type *bus) 400 { 401 struct xb_find_info info = { .dev = NULL, .nodename = nodename }; 402 403 bus_for_each_dev(bus, NULL, &info, cmp_dev); 404 return info.dev; 405 } 406 407 static int cleanup_dev(struct device *dev, void *data) 408 { 409 struct xenbus_device *xendev = to_xenbus_device(dev); 410 struct xb_find_info *info = data; 411 int len = strlen(info->nodename); 412 413 DPRINTK("%s", info->nodename); 414 415 /* Match the info->nodename path, or any subdirectory of that path. */ 416 if (strncmp(xendev->nodename, info->nodename, len)) 417 return 0; 418 419 /* If the node name is longer, ensure it really is a subdirectory. */ 420 if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/')) 421 return 0; 422 423 info->dev = xendev; 424 get_device(dev); 425 return 1; 426 } 427 428 static void xenbus_cleanup_devices(const char *path, struct bus_type *bus) 429 { 430 struct xb_find_info info = { .nodename = path }; 431 432 do { 433 info.dev = NULL; 434 bus_for_each_dev(bus, NULL, &info, cleanup_dev); 435 if (info.dev) { 436 device_unregister(&info.dev->dev); 437 put_device(&info.dev->dev); 438 } 439 } while (info.dev); 440 } 441 442 static void xenbus_dev_release(struct device *dev) 443 { 444 if (dev) 445 kfree(to_xenbus_device(dev)); 446 } 447 448 static ssize_t nodename_show(struct device *dev, 449 struct device_attribute *attr, char *buf) 450 { 451 return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); 452 } 453 static DEVICE_ATTR_RO(nodename); 454 455 static ssize_t devtype_show(struct device *dev, 456 struct device_attribute *attr, char *buf) 457 { 458 return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); 459 } 460 static DEVICE_ATTR_RO(devtype); 461 462 static ssize_t modalias_show(struct device *dev, 463 struct device_attribute *attr, char *buf) 464 { 465 return sprintf(buf, "%s:%s\n", dev->bus->name, 466 to_xenbus_device(dev)->devicetype); 467 } 468 static DEVICE_ATTR_RO(modalias); 469 470 static ssize_t state_show(struct device *dev, 471 struct device_attribute *attr, char *buf) 472 { 473 return sprintf(buf, "%s\n", 474 xenbus_strstate(to_xenbus_device(dev)->state)); 475 } 476 static DEVICE_ATTR_RO(state); 477 478 static struct attribute *xenbus_dev_attrs[] = { 479 &dev_attr_nodename.attr, 480 &dev_attr_devtype.attr, 481 &dev_attr_modalias.attr, 482 &dev_attr_state.attr, 483 NULL, 484 }; 485 486 static const struct attribute_group xenbus_dev_group = { 487 .attrs = xenbus_dev_attrs, 488 }; 489 490 const struct attribute_group *xenbus_dev_groups[] = { 491 &xenbus_dev_group, 492 NULL, 493 }; 494 EXPORT_SYMBOL_GPL(xenbus_dev_groups); 495 496 int xenbus_probe_node(struct xen_bus_type *bus, 497 const char *type, 498 const char *nodename) 499 { 500 char devname[XEN_BUS_ID_SIZE]; 501 int err; 502 struct xenbus_device *xendev; 503 size_t stringlen; 504 char *tmpstring; 505 506 enum xenbus_state state = xenbus_read_driver_state(nodename); 507 508 if (state != XenbusStateInitialising) { 509 /* Device is not new, so ignore it. This can happen if a 510 device is going away after switching to Closed. */ 511 return 0; 512 } 513 514 stringlen = strlen(nodename) + 1 + strlen(type) + 1; 515 xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL); 516 if (!xendev) 517 return -ENOMEM; 518 519 xendev->state = XenbusStateInitialising; 520 521 /* Copy the strings into the extra space. */ 522 523 tmpstring = (char *)(xendev + 1); 524 strcpy(tmpstring, nodename); 525 xendev->nodename = tmpstring; 526 527 tmpstring += strlen(tmpstring) + 1; 528 strcpy(tmpstring, type); 529 xendev->devicetype = tmpstring; 530 init_completion(&xendev->down); 531 532 xendev->dev.bus = &bus->bus; 533 xendev->dev.release = xenbus_dev_release; 534 535 err = bus->get_bus_id(devname, xendev->nodename); 536 if (err) 537 goto fail; 538 539 dev_set_name(&xendev->dev, "%s", devname); 540 sema_init(&xendev->reclaim_sem, 1); 541 542 /* Register with generic device framework. */ 543 err = device_register(&xendev->dev); 544 if (err) { 545 put_device(&xendev->dev); 546 xendev = NULL; 547 goto fail; 548 } 549 550 return 0; 551 fail: 552 kfree(xendev); 553 return err; 554 } 555 EXPORT_SYMBOL_GPL(xenbus_probe_node); 556 557 static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) 558 { 559 int err = 0; 560 char **dir; 561 unsigned int dir_n = 0; 562 int i; 563 564 dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n); 565 if (IS_ERR(dir)) 566 return PTR_ERR(dir); 567 568 for (i = 0; i < dir_n; i++) { 569 err = bus->probe(bus, type, dir[i]); 570 if (err) 571 break; 572 } 573 574 kfree(dir); 575 return err; 576 } 577 578 int xenbus_probe_devices(struct xen_bus_type *bus) 579 { 580 int err = 0; 581 char **dir; 582 unsigned int i, dir_n; 583 584 dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n); 585 if (IS_ERR(dir)) 586 return PTR_ERR(dir); 587 588 for (i = 0; i < dir_n; i++) { 589 err = xenbus_probe_device_type(bus, dir[i]); 590 if (err) 591 break; 592 } 593 594 kfree(dir); 595 return err; 596 } 597 EXPORT_SYMBOL_GPL(xenbus_probe_devices); 598 599 static unsigned int char_count(const char *str, char c) 600 { 601 unsigned int i, ret = 0; 602 603 for (i = 0; str[i]; i++) 604 if (str[i] == c) 605 ret++; 606 return ret; 607 } 608 609 static int strsep_len(const char *str, char c, unsigned int len) 610 { 611 unsigned int i; 612 613 for (i = 0; str[i]; i++) 614 if (str[i] == c) { 615 if (len == 0) 616 return i; 617 len--; 618 } 619 return (len == 0) ? i : -ERANGE; 620 } 621 622 void xenbus_dev_changed(const char *node, struct xen_bus_type *bus) 623 { 624 int exists, rootlen; 625 struct xenbus_device *dev; 626 char type[XEN_BUS_ID_SIZE]; 627 const char *p, *root; 628 629 if (char_count(node, '/') < 2) 630 return; 631 632 exists = xenbus_exists(XBT_NIL, node, ""); 633 if (!exists) { 634 xenbus_cleanup_devices(node, &bus->bus); 635 return; 636 } 637 638 /* backend/<type>/... or device/<type>/... */ 639 p = strchr(node, '/') + 1; 640 snprintf(type, XEN_BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p); 641 type[XEN_BUS_ID_SIZE-1] = '\0'; 642 643 rootlen = strsep_len(node, '/', bus->levels); 644 if (rootlen < 0) 645 return; 646 root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node); 647 if (!root) 648 return; 649 650 dev = xenbus_device_find(root, &bus->bus); 651 if (!dev) 652 xenbus_probe_node(bus, type, root); 653 else 654 put_device(&dev->dev); 655 656 kfree(root); 657 } 658 EXPORT_SYMBOL_GPL(xenbus_dev_changed); 659 660 int xenbus_dev_suspend(struct device *dev) 661 { 662 int err = 0; 663 struct xenbus_driver *drv; 664 struct xenbus_device *xdev 665 = container_of(dev, struct xenbus_device, dev); 666 667 DPRINTK("%s", xdev->nodename); 668 669 if (dev->driver == NULL) 670 return 0; 671 drv = to_xenbus_driver(dev->driver); 672 if (drv->suspend) 673 err = drv->suspend(xdev); 674 if (err) 675 dev_warn(dev, "suspend failed: %i\n", err); 676 return 0; 677 } 678 EXPORT_SYMBOL_GPL(xenbus_dev_suspend); 679 680 int xenbus_dev_resume(struct device *dev) 681 { 682 int err; 683 struct xenbus_driver *drv; 684 struct xenbus_device *xdev 685 = container_of(dev, struct xenbus_device, dev); 686 687 DPRINTK("%s", xdev->nodename); 688 689 if (dev->driver == NULL) 690 return 0; 691 drv = to_xenbus_driver(dev->driver); 692 err = talk_to_otherend(xdev); 693 if (err) { 694 dev_warn(dev, "resume (talk_to_otherend) failed: %i\n", err); 695 return err; 696 } 697 698 xdev->state = XenbusStateInitialising; 699 700 if (drv->resume) { 701 err = drv->resume(xdev); 702 if (err) { 703 dev_warn(dev, "resume failed: %i\n", err); 704 return err; 705 } 706 } 707 708 err = watch_otherend(xdev); 709 if (err) { 710 dev_warn(dev, "resume (watch_otherend) failed: %d\n", err); 711 return err; 712 } 713 714 return 0; 715 } 716 EXPORT_SYMBOL_GPL(xenbus_dev_resume); 717 718 int xenbus_dev_cancel(struct device *dev) 719 { 720 /* Do nothing */ 721 DPRINTK("cancel"); 722 return 0; 723 } 724 EXPORT_SYMBOL_GPL(xenbus_dev_cancel); 725 726 /* A flag to determine if xenstored is 'ready' (i.e. has started) */ 727 int xenstored_ready; 728 729 730 int register_xenstore_notifier(struct notifier_block *nb) 731 { 732 int ret = 0; 733 734 if (xenstored_ready > 0) 735 ret = nb->notifier_call(nb, 0, NULL); 736 else 737 blocking_notifier_chain_register(&xenstore_chain, nb); 738 739 return ret; 740 } 741 EXPORT_SYMBOL_GPL(register_xenstore_notifier); 742 743 void unregister_xenstore_notifier(struct notifier_block *nb) 744 { 745 blocking_notifier_chain_unregister(&xenstore_chain, nb); 746 } 747 EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); 748 749 static void xenbus_probe(void) 750 { 751 xenstored_ready = 1; 752 753 /* 754 * In the HVM case, xenbus_init() deferred its call to 755 * xs_init() in case callbacks were not operational yet. 756 * So do it now. 757 */ 758 if (xen_store_domain_type == XS_HVM) 759 xs_init(); 760 761 /* Notify others that xenstore is up */ 762 blocking_notifier_call_chain(&xenstore_chain, 0, NULL); 763 } 764 765 /* 766 * Returns true when XenStore init must be deferred in order to 767 * allow the PCI platform device to be initialised, before we 768 * can actually have event channel interrupts working. 769 */ 770 static bool xs_hvm_defer_init_for_callback(void) 771 { 772 #ifdef CONFIG_XEN_PVHVM 773 return xen_store_domain_type == XS_HVM && 774 !xen_have_vector_callback; 775 #else 776 return false; 777 #endif 778 } 779 780 static int xenbus_probe_thread(void *unused) 781 { 782 DEFINE_WAIT(w); 783 784 /* 785 * We actually just want to wait for *any* trigger of xb_waitq, 786 * and run xenbus_probe() the moment it occurs. 787 */ 788 prepare_to_wait(&xb_waitq, &w, TASK_INTERRUPTIBLE); 789 schedule(); 790 finish_wait(&xb_waitq, &w); 791 792 DPRINTK("probing"); 793 xenbus_probe(); 794 return 0; 795 } 796 797 static int __init xenbus_probe_initcall(void) 798 { 799 /* 800 * Probe XenBus here in the XS_PV case, and also XS_HVM unless we 801 * need to wait for the platform PCI device to come up. 802 */ 803 if (xen_store_domain_type == XS_PV || 804 (xen_store_domain_type == XS_HVM && 805 !xs_hvm_defer_init_for_callback())) 806 xenbus_probe(); 807 808 /* 809 * For XS_LOCAL, spawn a thread which will wait for xenstored 810 * or a xenstore-stubdom to be started, then probe. It will be 811 * triggered when communication starts happening, by waiting 812 * on xb_waitq. 813 */ 814 if (xen_store_domain_type == XS_LOCAL) { 815 struct task_struct *probe_task; 816 817 probe_task = kthread_run(xenbus_probe_thread, NULL, 818 "xenbus_probe"); 819 if (IS_ERR(probe_task)) 820 return PTR_ERR(probe_task); 821 } 822 return 0; 823 } 824 device_initcall(xenbus_probe_initcall); 825 826 int xen_set_callback_via(uint64_t via) 827 { 828 struct xen_hvm_param a; 829 int ret; 830 831 a.domid = DOMID_SELF; 832 a.index = HVM_PARAM_CALLBACK_IRQ; 833 a.value = via; 834 835 ret = HYPERVISOR_hvm_op(HVMOP_set_param, &a); 836 if (ret) 837 return ret; 838 839 /* 840 * If xenbus_probe_initcall() deferred the xenbus_probe() 841 * due to the callback not functioning yet, we can do it now. 842 */ 843 if (!xenstored_ready && xs_hvm_defer_init_for_callback()) 844 xenbus_probe(); 845 846 return ret; 847 } 848 EXPORT_SYMBOL_GPL(xen_set_callback_via); 849 850 /* Set up event channel for xenstored which is run as a local process 851 * (this is normally used only in dom0) 852 */ 853 static int __init xenstored_local_init(void) 854 { 855 int err = -ENOMEM; 856 unsigned long page = 0; 857 struct evtchn_alloc_unbound alloc_unbound; 858 859 /* Allocate Xenstore page */ 860 page = get_zeroed_page(GFP_KERNEL); 861 if (!page) 862 goto out_err; 863 864 xen_store_gfn = virt_to_gfn((void *)page); 865 866 /* Next allocate a local port which xenstored can bind to */ 867 alloc_unbound.dom = DOMID_SELF; 868 alloc_unbound.remote_dom = DOMID_SELF; 869 870 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, 871 &alloc_unbound); 872 if (err == -ENOSYS) 873 goto out_err; 874 875 BUG_ON(err); 876 xen_store_evtchn = alloc_unbound.port; 877 878 return 0; 879 880 out_err: 881 if (page != 0) 882 free_page(page); 883 return err; 884 } 885 886 static int xenbus_resume_cb(struct notifier_block *nb, 887 unsigned long action, void *data) 888 { 889 int err = 0; 890 891 if (xen_hvm_domain()) { 892 uint64_t v = 0; 893 894 err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); 895 if (!err && v) 896 xen_store_evtchn = v; 897 else 898 pr_warn("Cannot update xenstore event channel: %d\n", 899 err); 900 } else 901 xen_store_evtchn = xen_start_info->store_evtchn; 902 903 return err; 904 } 905 906 static struct notifier_block xenbus_resume_nb = { 907 .notifier_call = xenbus_resume_cb, 908 }; 909 910 static int __init xenbus_init(void) 911 { 912 int err = 0; 913 uint64_t v = 0; 914 xen_store_domain_type = XS_UNKNOWN; 915 916 if (!xen_domain()) 917 return -ENODEV; 918 919 xenbus_ring_ops_init(); 920 921 if (xen_pv_domain()) 922 xen_store_domain_type = XS_PV; 923 if (xen_hvm_domain()) 924 xen_store_domain_type = XS_HVM; 925 if (xen_hvm_domain() && xen_initial_domain()) 926 xen_store_domain_type = XS_LOCAL; 927 if (xen_pv_domain() && !xen_start_info->store_evtchn) 928 xen_store_domain_type = XS_LOCAL; 929 if (xen_pv_domain() && xen_start_info->store_evtchn) 930 xenstored_ready = 1; 931 932 switch (xen_store_domain_type) { 933 case XS_LOCAL: 934 err = xenstored_local_init(); 935 if (err) 936 goto out_error; 937 xen_store_interface = gfn_to_virt(xen_store_gfn); 938 break; 939 case XS_PV: 940 xen_store_evtchn = xen_start_info->store_evtchn; 941 xen_store_gfn = xen_start_info->store_mfn; 942 xen_store_interface = gfn_to_virt(xen_store_gfn); 943 break; 944 case XS_HVM: 945 err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); 946 if (err) 947 goto out_error; 948 xen_store_evtchn = (int)v; 949 err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); 950 if (err) 951 goto out_error; 952 xen_store_gfn = (unsigned long)v; 953 xen_store_interface = 954 xen_remap(xen_store_gfn << XEN_PAGE_SHIFT, 955 XEN_PAGE_SIZE); 956 break; 957 default: 958 pr_warn("Xenstore state unknown\n"); 959 break; 960 } 961 962 /* 963 * HVM domains may not have a functional callback yet. In that 964 * case let xs_init() be called from xenbus_probe(), which will 965 * get invoked at an appropriate time. 966 */ 967 if (xen_store_domain_type != XS_HVM) { 968 err = xs_init(); 969 if (err) { 970 pr_warn("Error initializing xenstore comms: %i\n", err); 971 goto out_error; 972 } 973 } 974 975 if ((xen_store_domain_type != XS_LOCAL) && 976 (xen_store_domain_type != XS_UNKNOWN)) 977 xen_resume_notifier_register(&xenbus_resume_nb); 978 979 #ifdef CONFIG_XEN_COMPAT_XENFS 980 /* 981 * Create xenfs mountpoint in /proc for compatibility with 982 * utilities that expect to find "xenbus" under "/proc/xen". 983 */ 984 proc_create_mount_point("xen"); 985 #endif 986 987 out_error: 988 return err; 989 } 990 991 postcore_initcall(xenbus_init); 992 993 MODULE_LICENSE("GPL"); 994