1 /****************************************************************************** 2 * Client-facing interface for the Xenbus driver. In other words, the 3 * interface between the Xenbus and the device-specific code, be it the 4 * frontend or the backend of that driver. 5 * 6 * Copyright (C) 2005 XenSource Ltd 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License version 2 10 * as published by the Free Software Foundation; or, when distributed 11 * separately from the Linux kernel or incorporated into other 12 * software packages, subject to the following license: 13 * 14 * Permission is hereby granted, free of charge, to any person obtaining a copy 15 * of this source file (the "Software"), to deal in the Software without 16 * restriction, including without limitation the rights to use, copy, modify, 17 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 18 * and to permit persons to whom the Software is furnished to do so, subject to 19 * the following conditions: 20 * 21 * The above copyright notice and this permission notice shall be included in 22 * all copies or substantial portions of the Software. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 30 * IN THE SOFTWARE. 31 */ 32 33 #include <linux/mm.h> 34 #include <linux/slab.h> 35 #include <linux/types.h> 36 #include <linux/spinlock.h> 37 #include <linux/vmalloc.h> 38 #include <linux/export.h> 39 #include <asm/xen/hypervisor.h> 40 #include <asm/xen/page.h> 41 #include <xen/interface/xen.h> 42 #include <xen/interface/event_channel.h> 43 #include <xen/balloon.h> 44 #include <xen/events.h> 45 #include <xen/grant_table.h> 46 #include <xen/xenbus.h> 47 #include <xen/xen.h> 48 49 #include "xenbus_probe.h" 50 51 struct xenbus_map_node { 52 struct list_head next; 53 union { 54 struct vm_struct *area; /* PV */ 55 struct page *page; /* HVM */ 56 }; 57 grant_handle_t handle; 58 }; 59 60 static DEFINE_SPINLOCK(xenbus_valloc_lock); 61 static LIST_HEAD(xenbus_valloc_pages); 62 63 struct xenbus_ring_ops { 64 int (*map)(struct xenbus_device *dev, int gnt, void **vaddr); 65 int (*unmap)(struct xenbus_device *dev, void *vaddr); 66 }; 67 68 static const struct xenbus_ring_ops *ring_ops __read_mostly; 69 70 const char *xenbus_strstate(enum xenbus_state state) 71 { 72 static const char *const name[] = { 73 [ XenbusStateUnknown ] = "Unknown", 74 [ XenbusStateInitialising ] = "Initialising", 75 [ XenbusStateInitWait ] = "InitWait", 76 [ XenbusStateInitialised ] = "Initialised", 77 [ XenbusStateConnected ] = "Connected", 78 [ XenbusStateClosing ] = "Closing", 79 [ XenbusStateClosed ] = "Closed", 80 [XenbusStateReconfiguring] = "Reconfiguring", 81 [XenbusStateReconfigured] = "Reconfigured", 82 }; 83 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; 84 } 85 EXPORT_SYMBOL_GPL(xenbus_strstate); 86 87 /** 88 * xenbus_watch_path - register a watch 89 * @dev: xenbus device 90 * @path: path to watch 91 * @watch: watch to register 92 * @callback: callback to register 93 * 94 * Register a @watch on the given path, using the given xenbus_watch structure 95 * for storage, and the given @callback function as the callback. Return 0 on 96 * success, or -errno on error. On success, the given @path will be saved as 97 * @watch->node, and remains the caller's to free. On error, @watch->node will 98 * be NULL, the device will switch to %XenbusStateClosing, and the error will 99 * be saved in the store. 100 */ 101 int xenbus_watch_path(struct xenbus_device *dev, const char *path, 102 struct xenbus_watch *watch, 103 void (*callback)(struct xenbus_watch *, 104 const char **, unsigned int)) 105 { 106 int err; 107 108 watch->node = path; 109 watch->callback = callback; 110 111 err = register_xenbus_watch(watch); 112 113 if (err) { 114 watch->node = NULL; 115 watch->callback = NULL; 116 xenbus_dev_fatal(dev, err, "adding watch on %s", path); 117 } 118 119 return err; 120 } 121 EXPORT_SYMBOL_GPL(xenbus_watch_path); 122 123 124 /** 125 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path 126 * @dev: xenbus device 127 * @watch: watch to register 128 * @callback: callback to register 129 * @pathfmt: format of path to watch 130 * 131 * Register a watch on the given @path, using the given xenbus_watch 132 * structure for storage, and the given @callback function as the callback. 133 * Return 0 on success, or -errno on error. On success, the watched path 134 * (@path/@path2) will be saved as @watch->node, and becomes the caller's to 135 * kfree(). On error, watch->node will be NULL, so the caller has nothing to 136 * free, the device will switch to %XenbusStateClosing, and the error will be 137 * saved in the store. 138 */ 139 int xenbus_watch_pathfmt(struct xenbus_device *dev, 140 struct xenbus_watch *watch, 141 void (*callback)(struct xenbus_watch *, 142 const char **, unsigned int), 143 const char *pathfmt, ...) 144 { 145 int err; 146 va_list ap; 147 char *path; 148 149 va_start(ap, pathfmt); 150 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap); 151 va_end(ap); 152 153 if (!path) { 154 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); 155 return -ENOMEM; 156 } 157 err = xenbus_watch_path(dev, path, watch, callback); 158 159 if (err) 160 kfree(path); 161 return err; 162 } 163 EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); 164 165 static void xenbus_switch_fatal(struct xenbus_device *, int, int, 166 const char *, ...); 167 168 static int 169 __xenbus_switch_state(struct xenbus_device *dev, 170 enum xenbus_state state, int depth) 171 { 172 /* We check whether the state is currently set to the given value, and 173 if not, then the state is set. We don't want to unconditionally 174 write the given state, because we don't want to fire watches 175 unnecessarily. Furthermore, if the node has gone, we don't write 176 to it, as the device will be tearing down, and we don't want to 177 resurrect that directory. 178 179 Note that, because of this cached value of our state, this 180 function will not take a caller's Xenstore transaction 181 (something it was trying to in the past) because dev->state 182 would not get reset if the transaction was aborted. 183 */ 184 185 struct xenbus_transaction xbt; 186 int current_state; 187 int err, abort; 188 189 if (state == dev->state) 190 return 0; 191 192 again: 193 abort = 1; 194 195 err = xenbus_transaction_start(&xbt); 196 if (err) { 197 xenbus_switch_fatal(dev, depth, err, "starting transaction"); 198 return 0; 199 } 200 201 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state); 202 if (err != 1) 203 goto abort; 204 205 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state); 206 if (err) { 207 xenbus_switch_fatal(dev, depth, err, "writing new state"); 208 goto abort; 209 } 210 211 abort = 0; 212 abort: 213 err = xenbus_transaction_end(xbt, abort); 214 if (err) { 215 if (err == -EAGAIN && !abort) 216 goto again; 217 xenbus_switch_fatal(dev, depth, err, "ending transaction"); 218 } else 219 dev->state = state; 220 221 return 0; 222 } 223 224 /** 225 * xenbus_switch_state 226 * @dev: xenbus device 227 * @state: new state 228 * 229 * Advertise in the store a change of the given driver to the given new_state. 230 * Return 0 on success, or -errno on error. On error, the device will switch 231 * to XenbusStateClosing, and the error will be saved in the store. 232 */ 233 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) 234 { 235 return __xenbus_switch_state(dev, state, 0); 236 } 237 238 EXPORT_SYMBOL_GPL(xenbus_switch_state); 239 240 int xenbus_frontend_closed(struct xenbus_device *dev) 241 { 242 xenbus_switch_state(dev, XenbusStateClosed); 243 complete(&dev->down); 244 return 0; 245 } 246 EXPORT_SYMBOL_GPL(xenbus_frontend_closed); 247 248 /** 249 * Return the path to the error node for the given device, or NULL on failure. 250 * If the value returned is non-NULL, then it is the caller's to kfree. 251 */ 252 static char *error_path(struct xenbus_device *dev) 253 { 254 return kasprintf(GFP_KERNEL, "error/%s", dev->nodename); 255 } 256 257 258 static void xenbus_va_dev_error(struct xenbus_device *dev, int err, 259 const char *fmt, va_list ap) 260 { 261 int ret; 262 unsigned int len; 263 char *printf_buffer = NULL; 264 char *path_buffer = NULL; 265 266 #define PRINTF_BUFFER_SIZE 4096 267 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); 268 if (printf_buffer == NULL) 269 goto fail; 270 271 len = sprintf(printf_buffer, "%i ", -err); 272 ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); 273 274 BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1); 275 276 dev_err(&dev->dev, "%s\n", printf_buffer); 277 278 path_buffer = error_path(dev); 279 280 if (path_buffer == NULL) { 281 dev_err(&dev->dev, "failed to write error node for %s (%s)\n", 282 dev->nodename, printf_buffer); 283 goto fail; 284 } 285 286 if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) { 287 dev_err(&dev->dev, "failed to write error node for %s (%s)\n", 288 dev->nodename, printf_buffer); 289 goto fail; 290 } 291 292 fail: 293 kfree(printf_buffer); 294 kfree(path_buffer); 295 } 296 297 298 /** 299 * xenbus_dev_error 300 * @dev: xenbus device 301 * @err: error to report 302 * @fmt: error message format 303 * 304 * Report the given negative errno into the store, along with the given 305 * formatted message. 306 */ 307 void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) 308 { 309 va_list ap; 310 311 va_start(ap, fmt); 312 xenbus_va_dev_error(dev, err, fmt, ap); 313 va_end(ap); 314 } 315 EXPORT_SYMBOL_GPL(xenbus_dev_error); 316 317 /** 318 * xenbus_dev_fatal 319 * @dev: xenbus device 320 * @err: error to report 321 * @fmt: error message format 322 * 323 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by 324 * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly 325 * closedown of this driver and its peer. 326 */ 327 328 void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) 329 { 330 va_list ap; 331 332 va_start(ap, fmt); 333 xenbus_va_dev_error(dev, err, fmt, ap); 334 va_end(ap); 335 336 xenbus_switch_state(dev, XenbusStateClosing); 337 } 338 EXPORT_SYMBOL_GPL(xenbus_dev_fatal); 339 340 /** 341 * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps 342 * avoiding recursion within xenbus_switch_state. 343 */ 344 static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err, 345 const char *fmt, ...) 346 { 347 va_list ap; 348 349 va_start(ap, fmt); 350 xenbus_va_dev_error(dev, err, fmt, ap); 351 va_end(ap); 352 353 if (!depth) 354 __xenbus_switch_state(dev, XenbusStateClosing, 1); 355 } 356 357 /** 358 * xenbus_grant_ring 359 * @dev: xenbus device 360 * @ring_mfn: mfn of ring to grant 361 362 * Grant access to the given @ring_mfn to the peer of the given device. Return 363 * 0 on success, or -errno on error. On error, the device will switch to 364 * XenbusStateClosing, and the error will be saved in the store. 365 */ 366 int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) 367 { 368 int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); 369 if (err < 0) 370 xenbus_dev_fatal(dev, err, "granting access to ring page"); 371 return err; 372 } 373 EXPORT_SYMBOL_GPL(xenbus_grant_ring); 374 375 376 /** 377 * Allocate an event channel for the given xenbus_device, assigning the newly 378 * created local port to *port. Return 0 on success, or -errno on error. On 379 * error, the device will switch to XenbusStateClosing, and the error will be 380 * saved in the store. 381 */ 382 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) 383 { 384 struct evtchn_alloc_unbound alloc_unbound; 385 int err; 386 387 alloc_unbound.dom = DOMID_SELF; 388 alloc_unbound.remote_dom = dev->otherend_id; 389 390 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, 391 &alloc_unbound); 392 if (err) 393 xenbus_dev_fatal(dev, err, "allocating event channel"); 394 else 395 *port = alloc_unbound.port; 396 397 return err; 398 } 399 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); 400 401 402 /** 403 * Bind to an existing interdomain event channel in another domain. Returns 0 404 * on success and stores the local port in *port. On error, returns -errno, 405 * switches the device to XenbusStateClosing, and saves the error in XenStore. 406 */ 407 int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port) 408 { 409 struct evtchn_bind_interdomain bind_interdomain; 410 int err; 411 412 bind_interdomain.remote_dom = dev->otherend_id; 413 bind_interdomain.remote_port = remote_port; 414 415 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, 416 &bind_interdomain); 417 if (err) 418 xenbus_dev_fatal(dev, err, 419 "binding to event channel %d from domain %d", 420 remote_port, dev->otherend_id); 421 else 422 *port = bind_interdomain.local_port; 423 424 return err; 425 } 426 EXPORT_SYMBOL_GPL(xenbus_bind_evtchn); 427 428 429 /** 430 * Free an existing event channel. Returns 0 on success or -errno on error. 431 */ 432 int xenbus_free_evtchn(struct xenbus_device *dev, int port) 433 { 434 struct evtchn_close close; 435 int err; 436 437 close.port = port; 438 439 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); 440 if (err) 441 xenbus_dev_error(dev, err, "freeing event channel %d", port); 442 443 return err; 444 } 445 EXPORT_SYMBOL_GPL(xenbus_free_evtchn); 446 447 448 /** 449 * xenbus_map_ring_valloc 450 * @dev: xenbus device 451 * @gnt_ref: grant reference 452 * @vaddr: pointer to address to be filled out by mapping 453 * 454 * Based on Rusty Russell's skeleton driver's map_page. 455 * Map a page of memory into this domain from another domain's grant table. 456 * xenbus_map_ring_valloc allocates a page of virtual address space, maps the 457 * page to that address, and sets *vaddr to that address. 458 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) 459 * or -ENOMEM on error. If an error is returned, device will switch to 460 * XenbusStateClosing and the error message will be saved in XenStore. 461 */ 462 int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) 463 { 464 return ring_ops->map(dev, gnt_ref, vaddr); 465 } 466 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); 467 468 static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, 469 int gnt_ref, void **vaddr) 470 { 471 struct gnttab_map_grant_ref op = { 472 .flags = GNTMAP_host_map | GNTMAP_contains_pte, 473 .ref = gnt_ref, 474 .dom = dev->otherend_id, 475 }; 476 struct xenbus_map_node *node; 477 struct vm_struct *area; 478 pte_t *pte; 479 480 *vaddr = NULL; 481 482 node = kzalloc(sizeof(*node), GFP_KERNEL); 483 if (!node) 484 return -ENOMEM; 485 486 area = alloc_vm_area(PAGE_SIZE, &pte); 487 if (!area) { 488 kfree(node); 489 return -ENOMEM; 490 } 491 492 op.host_addr = arbitrary_virt_to_machine(pte).maddr; 493 494 gnttab_batch_map(&op, 1); 495 496 if (op.status != GNTST_okay) { 497 free_vm_area(area); 498 kfree(node); 499 xenbus_dev_fatal(dev, op.status, 500 "mapping in shared page %d from domain %d", 501 gnt_ref, dev->otherend_id); 502 return op.status; 503 } 504 505 node->handle = op.handle; 506 node->area = area; 507 508 spin_lock(&xenbus_valloc_lock); 509 list_add(&node->next, &xenbus_valloc_pages); 510 spin_unlock(&xenbus_valloc_lock); 511 512 *vaddr = area->addr; 513 return 0; 514 } 515 516 static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, 517 int gnt_ref, void **vaddr) 518 { 519 struct xenbus_map_node *node; 520 int err; 521 void *addr; 522 523 *vaddr = NULL; 524 525 node = kzalloc(sizeof(*node), GFP_KERNEL); 526 if (!node) 527 return -ENOMEM; 528 529 err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */); 530 if (err) 531 goto out_err; 532 533 addr = pfn_to_kaddr(page_to_pfn(node->page)); 534 535 err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr); 536 if (err) 537 goto out_err; 538 539 spin_lock(&xenbus_valloc_lock); 540 list_add(&node->next, &xenbus_valloc_pages); 541 spin_unlock(&xenbus_valloc_lock); 542 543 *vaddr = addr; 544 return 0; 545 546 out_err: 547 free_xenballooned_pages(1, &node->page); 548 kfree(node); 549 return err; 550 } 551 552 553 /** 554 * xenbus_map_ring 555 * @dev: xenbus device 556 * @gnt_ref: grant reference 557 * @handle: pointer to grant handle to be filled 558 * @vaddr: address to be mapped to 559 * 560 * Map a page of memory into this domain from another domain's grant table. 561 * xenbus_map_ring does not allocate the virtual address space (you must do 562 * this yourself!). It only maps in the page to the specified address. 563 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) 564 * or -ENOMEM on error. If an error is returned, device will switch to 565 * XenbusStateClosing and the error message will be saved in XenStore. 566 */ 567 int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, 568 grant_handle_t *handle, void *vaddr) 569 { 570 struct gnttab_map_grant_ref op; 571 572 gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref, 573 dev->otherend_id); 574 575 gnttab_batch_map(&op, 1); 576 577 if (op.status != GNTST_okay) { 578 xenbus_dev_fatal(dev, op.status, 579 "mapping in shared page %d from domain %d", 580 gnt_ref, dev->otherend_id); 581 } else 582 *handle = op.handle; 583 584 return op.status; 585 } 586 EXPORT_SYMBOL_GPL(xenbus_map_ring); 587 588 589 /** 590 * xenbus_unmap_ring_vfree 591 * @dev: xenbus device 592 * @vaddr: addr to unmap 593 * 594 * Based on Rusty Russell's skeleton driver's unmap_page. 595 * Unmap a page of memory in this domain that was imported from another domain. 596 * Use xenbus_unmap_ring_vfree if you mapped in your memory with 597 * xenbus_map_ring_valloc (it will free the virtual address space). 598 * Returns 0 on success and returns GNTST_* on error 599 * (see xen/include/interface/grant_table.h). 600 */ 601 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) 602 { 603 return ring_ops->unmap(dev, vaddr); 604 } 605 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); 606 607 static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) 608 { 609 struct xenbus_map_node *node; 610 struct gnttab_unmap_grant_ref op = { 611 .host_addr = (unsigned long)vaddr, 612 }; 613 unsigned int level; 614 615 spin_lock(&xenbus_valloc_lock); 616 list_for_each_entry(node, &xenbus_valloc_pages, next) { 617 if (node->area->addr == vaddr) { 618 list_del(&node->next); 619 goto found; 620 } 621 } 622 node = NULL; 623 found: 624 spin_unlock(&xenbus_valloc_lock); 625 626 if (!node) { 627 xenbus_dev_error(dev, -ENOENT, 628 "can't find mapped virtual address %p", vaddr); 629 return GNTST_bad_virt_addr; 630 } 631 632 op.handle = node->handle; 633 op.host_addr = arbitrary_virt_to_machine( 634 lookup_address((unsigned long)vaddr, &level)).maddr; 635 636 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) 637 BUG(); 638 639 if (op.status == GNTST_okay) 640 free_vm_area(node->area); 641 else 642 xenbus_dev_error(dev, op.status, 643 "unmapping page at handle %d error %d", 644 node->handle, op.status); 645 646 kfree(node); 647 return op.status; 648 } 649 650 static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) 651 { 652 int rv; 653 struct xenbus_map_node *node; 654 void *addr; 655 656 spin_lock(&xenbus_valloc_lock); 657 list_for_each_entry(node, &xenbus_valloc_pages, next) { 658 addr = pfn_to_kaddr(page_to_pfn(node->page)); 659 if (addr == vaddr) { 660 list_del(&node->next); 661 goto found; 662 } 663 } 664 node = addr = NULL; 665 found: 666 spin_unlock(&xenbus_valloc_lock); 667 668 if (!node) { 669 xenbus_dev_error(dev, -ENOENT, 670 "can't find mapped virtual address %p", vaddr); 671 return GNTST_bad_virt_addr; 672 } 673 674 rv = xenbus_unmap_ring(dev, node->handle, addr); 675 676 if (!rv) 677 free_xenballooned_pages(1, &node->page); 678 else 679 WARN(1, "Leaking %p\n", vaddr); 680 681 kfree(node); 682 return rv; 683 } 684 685 /** 686 * xenbus_unmap_ring 687 * @dev: xenbus device 688 * @handle: grant handle 689 * @vaddr: addr to unmap 690 * 691 * Unmap a page of memory in this domain that was imported from another domain. 692 * Returns 0 on success and returns GNTST_* on error 693 * (see xen/include/interface/grant_table.h). 694 */ 695 int xenbus_unmap_ring(struct xenbus_device *dev, 696 grant_handle_t handle, void *vaddr) 697 { 698 struct gnttab_unmap_grant_ref op; 699 700 gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle); 701 702 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) 703 BUG(); 704 705 if (op.status != GNTST_okay) 706 xenbus_dev_error(dev, op.status, 707 "unmapping page at handle %d error %d", 708 handle, op.status); 709 710 return op.status; 711 } 712 EXPORT_SYMBOL_GPL(xenbus_unmap_ring); 713 714 715 /** 716 * xenbus_read_driver_state 717 * @path: path for driver 718 * 719 * Return the state of the driver rooted at the given store path, or 720 * XenbusStateUnknown if no state can be read. 721 */ 722 enum xenbus_state xenbus_read_driver_state(const char *path) 723 { 724 enum xenbus_state result; 725 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); 726 if (err) 727 result = XenbusStateUnknown; 728 729 return result; 730 } 731 EXPORT_SYMBOL_GPL(xenbus_read_driver_state); 732 733 static const struct xenbus_ring_ops ring_ops_pv = { 734 .map = xenbus_map_ring_valloc_pv, 735 .unmap = xenbus_unmap_ring_vfree_pv, 736 }; 737 738 static const struct xenbus_ring_ops ring_ops_hvm = { 739 .map = xenbus_map_ring_valloc_hvm, 740 .unmap = xenbus_unmap_ring_vfree_hvm, 741 }; 742 743 void __init xenbus_ring_ops_init(void) 744 { 745 if (xen_pv_domain()) 746 ring_ops = &ring_ops_pv; 747 else 748 ring_ops = &ring_ops_hvm; 749 } 750