14bac07c9SJeremy Fitzhardinge /******************************************************************************
24bac07c9SJeremy Fitzhardinge * Client-facing interface for the Xenbus driver. In other words, the
34bac07c9SJeremy Fitzhardinge * interface between the Xenbus and the device-specific code, be it the
44bac07c9SJeremy Fitzhardinge * frontend or the backend of that driver.
54bac07c9SJeremy Fitzhardinge *
64bac07c9SJeremy Fitzhardinge * Copyright (C) 2005 XenSource Ltd
74bac07c9SJeremy Fitzhardinge *
84bac07c9SJeremy Fitzhardinge * This program is free software; you can redistribute it and/or
94bac07c9SJeremy Fitzhardinge * modify it under the terms of the GNU General Public License version 2
104bac07c9SJeremy Fitzhardinge * as published by the Free Software Foundation; or, when distributed
114bac07c9SJeremy Fitzhardinge * separately from the Linux kernel or incorporated into other
124bac07c9SJeremy Fitzhardinge * software packages, subject to the following license:
134bac07c9SJeremy Fitzhardinge *
144bac07c9SJeremy Fitzhardinge * Permission is hereby granted, free of charge, to any person obtaining a copy
154bac07c9SJeremy Fitzhardinge * of this source file (the "Software"), to deal in the Software without
164bac07c9SJeremy Fitzhardinge * restriction, including without limitation the rights to use, copy, modify,
174bac07c9SJeremy Fitzhardinge * merge, publish, distribute, sublicense, and/or sell copies of the Software,
184bac07c9SJeremy Fitzhardinge * and to permit persons to whom the Software is furnished to do so, subject to
194bac07c9SJeremy Fitzhardinge * the following conditions:
204bac07c9SJeremy Fitzhardinge *
214bac07c9SJeremy Fitzhardinge * The above copyright notice and this permission notice shall be included in
224bac07c9SJeremy Fitzhardinge * all copies or substantial portions of the Software.
234bac07c9SJeremy Fitzhardinge *
244bac07c9SJeremy Fitzhardinge * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
254bac07c9SJeremy Fitzhardinge * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
264bac07c9SJeremy Fitzhardinge * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
274bac07c9SJeremy Fitzhardinge * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
284bac07c9SJeremy Fitzhardinge * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
294bac07c9SJeremy Fitzhardinge * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
304bac07c9SJeremy Fitzhardinge * IN THE SOFTWARE.
314bac07c9SJeremy Fitzhardinge */
324bac07c9SJeremy Fitzhardinge
3345e27161SSteven Noonan #include <linux/mm.h>
345a0e3ad6STejun Heo #include <linux/slab.h>
354bac07c9SJeremy Fitzhardinge #include <linux/types.h>
362c5d37d3SDaniel De Graaf #include <linux/spinlock.h>
374bac07c9SJeremy Fitzhardinge #include <linux/vmalloc.h>
3863c9744bSPaul Gortmaker #include <linux/export.h>
394bac07c9SJeremy Fitzhardinge #include <asm/xen/hypervisor.h>
40a9fd60e2SJulien Grall #include <xen/page.h>
414bac07c9SJeremy Fitzhardinge #include <xen/interface/xen.h>
424bac07c9SJeremy Fitzhardinge #include <xen/interface/event_channel.h>
432c5d37d3SDaniel De Graaf #include <xen/balloon.h>
444bac07c9SJeremy Fitzhardinge #include <xen/events.h>
454bac07c9SJeremy Fitzhardinge #include <xen/grant_table.h>
464bac07c9SJeremy Fitzhardinge #include <xen/xenbus.h>
472c5d37d3SDaniel De Graaf #include <xen/xen.h>
48be3e9cf3SMukesh Rathor #include <xen/features.h>
492c5d37d3SDaniel De Graaf
50332f791dSJuergen Gross #include "xenbus.h"
512c5d37d3SDaniel De Graaf
5289bf4b4eSJulien Grall #define XENBUS_PAGES(_grants) (DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE))
5389bf4b4eSJulien Grall
5489bf4b4eSJulien Grall #define XENBUS_MAX_RING_PAGES (XENBUS_PAGES(XENBUS_MAX_RING_GRANTS))
5589bf4b4eSJulien Grall
562c5d37d3SDaniel De Graaf struct xenbus_map_node {
572c5d37d3SDaniel De Graaf struct list_head next;
582c5d37d3SDaniel De Graaf union {
59ccc9d90aSWei Liu struct {
60ccc9d90aSWei Liu struct vm_struct *area;
61ccc9d90aSWei Liu } pv;
62ccc9d90aSWei Liu struct {
6389bf4b4eSJulien Grall struct page *pages[XENBUS_MAX_RING_PAGES];
6489bf4b4eSJulien Grall unsigned long addrs[XENBUS_MAX_RING_GRANTS];
65ccc9d90aSWei Liu void *addr;
66ccc9d90aSWei Liu } hvm;
672c5d37d3SDaniel De Graaf };
689cce2914SJulien Grall grant_handle_t handles[XENBUS_MAX_RING_GRANTS];
69ccc9d90aSWei Liu unsigned int nr_handles;
702c5d37d3SDaniel De Graaf };
712c5d37d3SDaniel De Graaf
723848e4e0SJuergen Gross struct map_ring_valloc {
733848e4e0SJuergen Gross struct xenbus_map_node *node;
743848e4e0SJuergen Gross
753848e4e0SJuergen Gross /* Why do we need two arrays? See comment of __xenbus_map_ring */
763848e4e0SJuergen Gross unsigned long addrs[XENBUS_MAX_RING_GRANTS];
773848e4e0SJuergen Gross phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
783848e4e0SJuergen Gross
793848e4e0SJuergen Gross struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
803848e4e0SJuergen Gross struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
813848e4e0SJuergen Gross
82b723caecSChristoph Hellwig unsigned int idx;
833848e4e0SJuergen Gross };
843848e4e0SJuergen Gross
852c5d37d3SDaniel De Graaf static DEFINE_SPINLOCK(xenbus_valloc_lock);
862c5d37d3SDaniel De Graaf static LIST_HEAD(xenbus_valloc_pages);
872c5d37d3SDaniel De Graaf
882c5d37d3SDaniel De Graaf struct xenbus_ring_ops {
893848e4e0SJuergen Gross int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info,
90ccc9d90aSWei Liu grant_ref_t *gnt_refs, unsigned int nr_grefs,
91ccc9d90aSWei Liu void **vaddr);
922c5d37d3SDaniel De Graaf int (*unmap)(struct xenbus_device *dev, void *vaddr);
932c5d37d3SDaniel De Graaf };
942c5d37d3SDaniel De Graaf
952c5d37d3SDaniel De Graaf static const struct xenbus_ring_ops *ring_ops __read_mostly;
964bac07c9SJeremy Fitzhardinge
xenbus_strstate(enum xenbus_state state)974bac07c9SJeremy Fitzhardinge const char *xenbus_strstate(enum xenbus_state state)
984bac07c9SJeremy Fitzhardinge {
994bac07c9SJeremy Fitzhardinge static const char *const name[] = {
1004bac07c9SJeremy Fitzhardinge [ XenbusStateUnknown ] = "Unknown",
1014bac07c9SJeremy Fitzhardinge [ XenbusStateInitialising ] = "Initialising",
1024bac07c9SJeremy Fitzhardinge [ XenbusStateInitWait ] = "InitWait",
1034bac07c9SJeremy Fitzhardinge [ XenbusStateInitialised ] = "Initialised",
1044bac07c9SJeremy Fitzhardinge [ XenbusStateConnected ] = "Connected",
1054bac07c9SJeremy Fitzhardinge [ XenbusStateClosing ] = "Closing",
1064bac07c9SJeremy Fitzhardinge [ XenbusStateClosed ] = "Closed",
10789afb6e4SYosuke Iwamatsu [XenbusStateReconfiguring] = "Reconfiguring",
10889afb6e4SYosuke Iwamatsu [XenbusStateReconfigured] = "Reconfigured",
1094bac07c9SJeremy Fitzhardinge };
1104bac07c9SJeremy Fitzhardinge return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
1114bac07c9SJeremy Fitzhardinge }
1124bac07c9SJeremy Fitzhardinge EXPORT_SYMBOL_GPL(xenbus_strstate);
1134bac07c9SJeremy Fitzhardinge
1144bac07c9SJeremy Fitzhardinge /**
1154bac07c9SJeremy Fitzhardinge * xenbus_watch_path - register a watch
1164bac07c9SJeremy Fitzhardinge * @dev: xenbus device
1174bac07c9SJeremy Fitzhardinge * @path: path to watch
1184bac07c9SJeremy Fitzhardinge * @watch: watch to register
1194bac07c9SJeremy Fitzhardinge * @callback: callback to register
1204bac07c9SJeremy Fitzhardinge *
1214bac07c9SJeremy Fitzhardinge * Register a @watch on the given path, using the given xenbus_watch structure
1224bac07c9SJeremy Fitzhardinge * for storage, and the given @callback function as the callback. Return 0 on
1234bac07c9SJeremy Fitzhardinge * success, or -errno on error. On success, the given @path will be saved as
1244bac07c9SJeremy Fitzhardinge * @watch->node, and remains the caller's to free. On error, @watch->node will
1254bac07c9SJeremy Fitzhardinge * be NULL, the device will switch to %XenbusStateClosing, and the error will
1264bac07c9SJeremy Fitzhardinge * be saved in the store.
1274bac07c9SJeremy Fitzhardinge */
xenbus_watch_path(struct xenbus_device * dev,const char * path,struct xenbus_watch * watch,bool (* will_handle)(struct xenbus_watch *,const char *,const char *),void (* callback)(struct xenbus_watch *,const char *,const char *))1284bac07c9SJeremy Fitzhardinge int xenbus_watch_path(struct xenbus_device *dev, const char *path,
1294bac07c9SJeremy Fitzhardinge struct xenbus_watch *watch,
1302e85d32bSSeongJae Park bool (*will_handle)(struct xenbus_watch *,
1312e85d32bSSeongJae Park const char *, const char *),
1324bac07c9SJeremy Fitzhardinge void (*callback)(struct xenbus_watch *,
1335584ea25SJuergen Gross const char *, const char *))
1344bac07c9SJeremy Fitzhardinge {
1354bac07c9SJeremy Fitzhardinge int err;
1364bac07c9SJeremy Fitzhardinge
1374bac07c9SJeremy Fitzhardinge watch->node = path;
1382e85d32bSSeongJae Park watch->will_handle = will_handle;
1394bac07c9SJeremy Fitzhardinge watch->callback = callback;
1404bac07c9SJeremy Fitzhardinge
1414bac07c9SJeremy Fitzhardinge err = register_xenbus_watch(watch);
1424bac07c9SJeremy Fitzhardinge
1434bac07c9SJeremy Fitzhardinge if (err) {
1444bac07c9SJeremy Fitzhardinge watch->node = NULL;
1452e85d32bSSeongJae Park watch->will_handle = NULL;
1464bac07c9SJeremy Fitzhardinge watch->callback = NULL;
1474bac07c9SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "adding watch on %s", path);
1484bac07c9SJeremy Fitzhardinge }
1494bac07c9SJeremy Fitzhardinge
1504bac07c9SJeremy Fitzhardinge return err;
1514bac07c9SJeremy Fitzhardinge }
1524bac07c9SJeremy Fitzhardinge EXPORT_SYMBOL_GPL(xenbus_watch_path);
1534bac07c9SJeremy Fitzhardinge
1544bac07c9SJeremy Fitzhardinge
1554bac07c9SJeremy Fitzhardinge /**
1564bac07c9SJeremy Fitzhardinge * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
1574bac07c9SJeremy Fitzhardinge * @dev: xenbus device
1584bac07c9SJeremy Fitzhardinge * @watch: watch to register
1594bac07c9SJeremy Fitzhardinge * @callback: callback to register
1604bac07c9SJeremy Fitzhardinge * @pathfmt: format of path to watch
1614bac07c9SJeremy Fitzhardinge *
1624bac07c9SJeremy Fitzhardinge * Register a watch on the given @path, using the given xenbus_watch
1634bac07c9SJeremy Fitzhardinge * structure for storage, and the given @callback function as the callback.
1644bac07c9SJeremy Fitzhardinge * Return 0 on success, or -errno on error. On success, the watched path
1654bac07c9SJeremy Fitzhardinge * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
1664bac07c9SJeremy Fitzhardinge * kfree(). On error, watch->node will be NULL, so the caller has nothing to
1674bac07c9SJeremy Fitzhardinge * free, the device will switch to %XenbusStateClosing, and the error will be
1684bac07c9SJeremy Fitzhardinge * saved in the store.
1694bac07c9SJeremy Fitzhardinge */
xenbus_watch_pathfmt(struct xenbus_device * dev,struct xenbus_watch * watch,bool (* will_handle)(struct xenbus_watch *,const char *,const char *),void (* callback)(struct xenbus_watch *,const char *,const char *),const char * pathfmt,...)1704bac07c9SJeremy Fitzhardinge int xenbus_watch_pathfmt(struct xenbus_device *dev,
1714bac07c9SJeremy Fitzhardinge struct xenbus_watch *watch,
1722e85d32bSSeongJae Park bool (*will_handle)(struct xenbus_watch *,
1732e85d32bSSeongJae Park const char *, const char *),
1744bac07c9SJeremy Fitzhardinge void (*callback)(struct xenbus_watch *,
1755584ea25SJuergen Gross const char *, const char *),
1764bac07c9SJeremy Fitzhardinge const char *pathfmt, ...)
1774bac07c9SJeremy Fitzhardinge {
1784bac07c9SJeremy Fitzhardinge int err;
1794bac07c9SJeremy Fitzhardinge va_list ap;
1804bac07c9SJeremy Fitzhardinge char *path;
1814bac07c9SJeremy Fitzhardinge
1824bac07c9SJeremy Fitzhardinge va_start(ap, pathfmt);
183a144ff09SIan Campbell path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
1844bac07c9SJeremy Fitzhardinge va_end(ap);
1854bac07c9SJeremy Fitzhardinge
1864bac07c9SJeremy Fitzhardinge if (!path) {
1874bac07c9SJeremy Fitzhardinge xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
1884bac07c9SJeremy Fitzhardinge return -ENOMEM;
1894bac07c9SJeremy Fitzhardinge }
1902e85d32bSSeongJae Park err = xenbus_watch_path(dev, path, watch, will_handle, callback);
1914bac07c9SJeremy Fitzhardinge
1924bac07c9SJeremy Fitzhardinge if (err)
1934bac07c9SJeremy Fitzhardinge kfree(path);
1944bac07c9SJeremy Fitzhardinge return err;
1954bac07c9SJeremy Fitzhardinge }
1964bac07c9SJeremy Fitzhardinge EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
1974bac07c9SJeremy Fitzhardinge
1985b61cb90SDaniel Stodden static void xenbus_switch_fatal(struct xenbus_device *, int, int,
1995b61cb90SDaniel Stodden const char *, ...);
2005b61cb90SDaniel Stodden
2015b61cb90SDaniel Stodden static int
__xenbus_switch_state(struct xenbus_device * dev,enum xenbus_state state,int depth)2025b61cb90SDaniel Stodden __xenbus_switch_state(struct xenbus_device *dev,
2035b61cb90SDaniel Stodden enum xenbus_state state, int depth)
2045b61cb90SDaniel Stodden {
2055b61cb90SDaniel Stodden /* We check whether the state is currently set to the given value, and
2065b61cb90SDaniel Stodden if not, then the state is set. We don't want to unconditionally
2075b61cb90SDaniel Stodden write the given state, because we don't want to fire watches
2085b61cb90SDaniel Stodden unnecessarily. Furthermore, if the node has gone, we don't write
2095b61cb90SDaniel Stodden to it, as the device will be tearing down, and we don't want to
2105b61cb90SDaniel Stodden resurrect that directory.
2115b61cb90SDaniel Stodden
2125b61cb90SDaniel Stodden Note that, because of this cached value of our state, this
2135b61cb90SDaniel Stodden function will not take a caller's Xenstore transaction
2145b61cb90SDaniel Stodden (something it was trying to in the past) because dev->state
2155b61cb90SDaniel Stodden would not get reset if the transaction was aborted.
2165b61cb90SDaniel Stodden */
2175b61cb90SDaniel Stodden
2185b61cb90SDaniel Stodden struct xenbus_transaction xbt;
2195b61cb90SDaniel Stodden int current_state;
2205b61cb90SDaniel Stodden int err, abort;
2215b61cb90SDaniel Stodden
2225b61cb90SDaniel Stodden if (state == dev->state)
2235b61cb90SDaniel Stodden return 0;
2245b61cb90SDaniel Stodden
2255b61cb90SDaniel Stodden again:
2265b61cb90SDaniel Stodden abort = 1;
2275b61cb90SDaniel Stodden
2285b61cb90SDaniel Stodden err = xenbus_transaction_start(&xbt);
2295b61cb90SDaniel Stodden if (err) {
2305b61cb90SDaniel Stodden xenbus_switch_fatal(dev, depth, err, "starting transaction");
2315b61cb90SDaniel Stodden return 0;
2325b61cb90SDaniel Stodden }
2335b61cb90SDaniel Stodden
2345b61cb90SDaniel Stodden err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state);
2355b61cb90SDaniel Stodden if (err != 1)
2365b61cb90SDaniel Stodden goto abort;
2375b61cb90SDaniel Stodden
2385b61cb90SDaniel Stodden err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
2395b61cb90SDaniel Stodden if (err) {
2405b61cb90SDaniel Stodden xenbus_switch_fatal(dev, depth, err, "writing new state");
2415b61cb90SDaniel Stodden goto abort;
2425b61cb90SDaniel Stodden }
2435b61cb90SDaniel Stodden
2445b61cb90SDaniel Stodden abort = 0;
2455b61cb90SDaniel Stodden abort:
2465b61cb90SDaniel Stodden err = xenbus_transaction_end(xbt, abort);
2475b61cb90SDaniel Stodden if (err) {
2485b61cb90SDaniel Stodden if (err == -EAGAIN && !abort)
2495b61cb90SDaniel Stodden goto again;
2505b61cb90SDaniel Stodden xenbus_switch_fatal(dev, depth, err, "ending transaction");
2515b61cb90SDaniel Stodden } else
2525b61cb90SDaniel Stodden dev->state = state;
2535b61cb90SDaniel Stodden
2545b61cb90SDaniel Stodden return 0;
2555b61cb90SDaniel Stodden }
2564bac07c9SJeremy Fitzhardinge
2574bac07c9SJeremy Fitzhardinge /**
2584bac07c9SJeremy Fitzhardinge * xenbus_switch_state
2594bac07c9SJeremy Fitzhardinge * @dev: xenbus device
2604bac07c9SJeremy Fitzhardinge * @state: new state
2614bac07c9SJeremy Fitzhardinge *
2624bac07c9SJeremy Fitzhardinge * Advertise in the store a change of the given driver to the given new_state.
2634bac07c9SJeremy Fitzhardinge * Return 0 on success, or -errno on error. On error, the device will switch
2644bac07c9SJeremy Fitzhardinge * to XenbusStateClosing, and the error will be saved in the store.
2654bac07c9SJeremy Fitzhardinge */
xenbus_switch_state(struct xenbus_device * dev,enum xenbus_state state)2664bac07c9SJeremy Fitzhardinge int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
2674bac07c9SJeremy Fitzhardinge {
2685b61cb90SDaniel Stodden return __xenbus_switch_state(dev, state, 0);
2694bac07c9SJeremy Fitzhardinge }
2704bac07c9SJeremy Fitzhardinge
2714bac07c9SJeremy Fitzhardinge EXPORT_SYMBOL_GPL(xenbus_switch_state);
2724bac07c9SJeremy Fitzhardinge
xenbus_frontend_closed(struct xenbus_device * dev)2734bac07c9SJeremy Fitzhardinge int xenbus_frontend_closed(struct xenbus_device *dev)
2744bac07c9SJeremy Fitzhardinge {
2754bac07c9SJeremy Fitzhardinge xenbus_switch_state(dev, XenbusStateClosed);
2764bac07c9SJeremy Fitzhardinge complete(&dev->down);
2774bac07c9SJeremy Fitzhardinge return 0;
2784bac07c9SJeremy Fitzhardinge }
2794bac07c9SJeremy Fitzhardinge EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
2804bac07c9SJeremy Fitzhardinge
xenbus_va_dev_error(struct xenbus_device * dev,int err,const char * fmt,va_list ap)2814bac07c9SJeremy Fitzhardinge static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
2824bac07c9SJeremy Fitzhardinge const char *fmt, va_list ap)
2834bac07c9SJeremy Fitzhardinge {
2844bac07c9SJeremy Fitzhardinge unsigned int len;
285c0d197d5SJoe Perches char *printf_buffer;
286c0d197d5SJoe Perches char *path_buffer;
2874bac07c9SJeremy Fitzhardinge
2884bac07c9SJeremy Fitzhardinge #define PRINTF_BUFFER_SIZE 4096
289c0d197d5SJoe Perches
2904bac07c9SJeremy Fitzhardinge printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
291c0d197d5SJoe Perches if (!printf_buffer)
292c0d197d5SJoe Perches return;
2934bac07c9SJeremy Fitzhardinge
2944bac07c9SJeremy Fitzhardinge len = sprintf(printf_buffer, "%i ", -err);
295305559f1SChen Gang vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap);
2964bac07c9SJeremy Fitzhardinge
2974bac07c9SJeremy Fitzhardinge dev_err(&dev->dev, "%s\n", printf_buffer);
2984bac07c9SJeremy Fitzhardinge
299c0d197d5SJoe Perches path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
3007a048cecSJuergen Gross if (path_buffer)
3017a048cecSJuergen Gross xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer);
3024bac07c9SJeremy Fitzhardinge
3034bac07c9SJeremy Fitzhardinge kfree(printf_buffer);
3044bac07c9SJeremy Fitzhardinge kfree(path_buffer);
3054bac07c9SJeremy Fitzhardinge }
3064bac07c9SJeremy Fitzhardinge
3074bac07c9SJeremy Fitzhardinge /**
3084bac07c9SJeremy Fitzhardinge * xenbus_dev_error
3094bac07c9SJeremy Fitzhardinge * @dev: xenbus device
3104bac07c9SJeremy Fitzhardinge * @err: error to report
3114bac07c9SJeremy Fitzhardinge * @fmt: error message format
3124bac07c9SJeremy Fitzhardinge *
3134bac07c9SJeremy Fitzhardinge * Report the given negative errno into the store, along with the given
3144bac07c9SJeremy Fitzhardinge * formatted message.
3154bac07c9SJeremy Fitzhardinge */
xenbus_dev_error(struct xenbus_device * dev,int err,const char * fmt,...)3164bac07c9SJeremy Fitzhardinge void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
3174bac07c9SJeremy Fitzhardinge {
3184bac07c9SJeremy Fitzhardinge va_list ap;
3194bac07c9SJeremy Fitzhardinge
3204bac07c9SJeremy Fitzhardinge va_start(ap, fmt);
3214bac07c9SJeremy Fitzhardinge xenbus_va_dev_error(dev, err, fmt, ap);
3224bac07c9SJeremy Fitzhardinge va_end(ap);
3234bac07c9SJeremy Fitzhardinge }
3244bac07c9SJeremy Fitzhardinge EXPORT_SYMBOL_GPL(xenbus_dev_error);
3254bac07c9SJeremy Fitzhardinge
3264bac07c9SJeremy Fitzhardinge /**
3274bac07c9SJeremy Fitzhardinge * xenbus_dev_fatal
3284bac07c9SJeremy Fitzhardinge * @dev: xenbus device
3294bac07c9SJeremy Fitzhardinge * @err: error to report
3304bac07c9SJeremy Fitzhardinge * @fmt: error message format
3314bac07c9SJeremy Fitzhardinge *
3324bac07c9SJeremy Fitzhardinge * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
333d8220347SQinghuang Feng * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
3344bac07c9SJeremy Fitzhardinge * closedown of this driver and its peer.
3354bac07c9SJeremy Fitzhardinge */
3364bac07c9SJeremy Fitzhardinge
xenbus_dev_fatal(struct xenbus_device * dev,int err,const char * fmt,...)3374bac07c9SJeremy Fitzhardinge void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
3384bac07c9SJeremy Fitzhardinge {
3394bac07c9SJeremy Fitzhardinge va_list ap;
3404bac07c9SJeremy Fitzhardinge
3414bac07c9SJeremy Fitzhardinge va_start(ap, fmt);
3424bac07c9SJeremy Fitzhardinge xenbus_va_dev_error(dev, err, fmt, ap);
3434bac07c9SJeremy Fitzhardinge va_end(ap);
3444bac07c9SJeremy Fitzhardinge
3454bac07c9SJeremy Fitzhardinge xenbus_switch_state(dev, XenbusStateClosing);
3464bac07c9SJeremy Fitzhardinge }
3474bac07c9SJeremy Fitzhardinge EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
3484bac07c9SJeremy Fitzhardinge
3494bac07c9SJeremy Fitzhardinge /**
3505b61cb90SDaniel Stodden * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
3515b61cb90SDaniel Stodden * avoiding recursion within xenbus_switch_state.
3525b61cb90SDaniel Stodden */
xenbus_switch_fatal(struct xenbus_device * dev,int depth,int err,const char * fmt,...)3535b61cb90SDaniel Stodden static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
3545b61cb90SDaniel Stodden const char *fmt, ...)
3555b61cb90SDaniel Stodden {
3565b61cb90SDaniel Stodden va_list ap;
3575b61cb90SDaniel Stodden
3585b61cb90SDaniel Stodden va_start(ap, fmt);
3595b61cb90SDaniel Stodden xenbus_va_dev_error(dev, err, fmt, ap);
3605b61cb90SDaniel Stodden va_end(ap);
3615b61cb90SDaniel Stodden
3625b61cb90SDaniel Stodden if (!depth)
3635b61cb90SDaniel Stodden __xenbus_switch_state(dev, XenbusStateClosing, 1);
3645b61cb90SDaniel Stodden }
3655b61cb90SDaniel Stodden
3667050096dSJuergen Gross /*
3677050096dSJuergen Gross * xenbus_setup_ring
3687050096dSJuergen Gross * @dev: xenbus device
3697050096dSJuergen Gross * @vaddr: pointer to starting virtual address of the ring
3707050096dSJuergen Gross * @nr_pages: number of pages to be granted
3717050096dSJuergen Gross * @grefs: grant reference array to be filled in
3727050096dSJuergen Gross *
3737050096dSJuergen Gross * Allocate physically contiguous pages for a shared ring buffer and grant it
3747050096dSJuergen Gross * to the peer of the given device. The ring buffer is initially filled with
3757050096dSJuergen Gross * zeroes. The virtual address of the ring is stored at @vaddr and the
3767050096dSJuergen Gross * grant references are stored in the @grefs array. In case of error @vaddr
3777050096dSJuergen Gross * will be set to NULL and @grefs will be filled with INVALID_GRANT_REF.
3787050096dSJuergen Gross */
xenbus_setup_ring(struct xenbus_device * dev,gfp_t gfp,void ** vaddr,unsigned int nr_pages,grant_ref_t * grefs)3797050096dSJuergen Gross int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
3807050096dSJuergen Gross unsigned int nr_pages, grant_ref_t *grefs)
3817050096dSJuergen Gross {
3827050096dSJuergen Gross unsigned long ring_size = nr_pages * XEN_PAGE_SIZE;
3834573240fSJuergen Gross grant_ref_t gref_head;
3847050096dSJuergen Gross unsigned int i;
385*ce6b8ccdSJuergen Gross void *addr;
3867050096dSJuergen Gross int ret;
3877050096dSJuergen Gross
388*ce6b8ccdSJuergen Gross addr = *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
3897050096dSJuergen Gross if (!*vaddr) {
3907050096dSJuergen Gross ret = -ENOMEM;
3917050096dSJuergen Gross goto err;
3927050096dSJuergen Gross }
3937050096dSJuergen Gross
3944573240fSJuergen Gross ret = gnttab_alloc_grant_references(nr_pages, &gref_head);
3954573240fSJuergen Gross if (ret) {
3964573240fSJuergen Gross xenbus_dev_fatal(dev, ret, "granting access to %u ring pages",
3974573240fSJuergen Gross nr_pages);
3987050096dSJuergen Gross goto err;
3994573240fSJuergen Gross }
4004573240fSJuergen Gross
4014573240fSJuergen Gross for (i = 0; i < nr_pages; i++) {
4024573240fSJuergen Gross unsigned long gfn;
4034573240fSJuergen Gross
4044573240fSJuergen Gross if (is_vmalloc_addr(*vaddr))
405*ce6b8ccdSJuergen Gross gfn = pfn_to_gfn(vmalloc_to_pfn(addr));
4064573240fSJuergen Gross else
407*ce6b8ccdSJuergen Gross gfn = virt_to_gfn(addr);
4084573240fSJuergen Gross
4094573240fSJuergen Gross grefs[i] = gnttab_claim_grant_reference(&gref_head);
4104573240fSJuergen Gross gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
4114573240fSJuergen Gross gfn, 0);
412*ce6b8ccdSJuergen Gross
413*ce6b8ccdSJuergen Gross addr += XEN_PAGE_SIZE;
4144573240fSJuergen Gross }
4157050096dSJuergen Gross
4167050096dSJuergen Gross return 0;
4177050096dSJuergen Gross
4187050096dSJuergen Gross err:
4197050096dSJuergen Gross if (*vaddr)
4207050096dSJuergen Gross free_pages_exact(*vaddr, ring_size);
4217050096dSJuergen Gross for (i = 0; i < nr_pages; i++)
4227050096dSJuergen Gross grefs[i] = INVALID_GRANT_REF;
4237050096dSJuergen Gross *vaddr = NULL;
4247050096dSJuergen Gross
4257050096dSJuergen Gross return ret;
4267050096dSJuergen Gross }
4277050096dSJuergen Gross EXPORT_SYMBOL_GPL(xenbus_setup_ring);
4287050096dSJuergen Gross
4297050096dSJuergen Gross /*
4307050096dSJuergen Gross * xenbus_teardown_ring
4317050096dSJuergen Gross * @vaddr: starting virtual address of the ring
4327050096dSJuergen Gross * @nr_pages: number of pages
4337050096dSJuergen Gross * @grefs: grant reference array
4347050096dSJuergen Gross *
4357050096dSJuergen Gross * Remove grants for the shared ring buffer and free the associated memory.
4367050096dSJuergen Gross * On return the grant reference array is filled with INVALID_GRANT_REF.
4377050096dSJuergen Gross */
xenbus_teardown_ring(void ** vaddr,unsigned int nr_pages,grant_ref_t * grefs)4387050096dSJuergen Gross void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages,
4397050096dSJuergen Gross grant_ref_t *grefs)
4407050096dSJuergen Gross {
4417050096dSJuergen Gross unsigned int i;
4427050096dSJuergen Gross
4437050096dSJuergen Gross for (i = 0; i < nr_pages; i++) {
4447050096dSJuergen Gross if (grefs[i] != INVALID_GRANT_REF) {
44549f8b459SJuergen Gross gnttab_end_foreign_access(grefs[i], NULL);
4467050096dSJuergen Gross grefs[i] = INVALID_GRANT_REF;
4477050096dSJuergen Gross }
4487050096dSJuergen Gross }
4497050096dSJuergen Gross
4507050096dSJuergen Gross if (*vaddr)
4517050096dSJuergen Gross free_pages_exact(*vaddr, nr_pages * XEN_PAGE_SIZE);
4527050096dSJuergen Gross *vaddr = NULL;
4537050096dSJuergen Gross }
4547050096dSJuergen Gross EXPORT_SYMBOL_GPL(xenbus_teardown_ring);
4554bac07c9SJeremy Fitzhardinge
4564bac07c9SJeremy Fitzhardinge /**
4574bac07c9SJeremy Fitzhardinge * Allocate an event channel for the given xenbus_device, assigning the newly
4584bac07c9SJeremy Fitzhardinge * created local port to *port. Return 0 on success, or -errno on error. On
4594bac07c9SJeremy Fitzhardinge * error, the device will switch to XenbusStateClosing, and the error will be
4604bac07c9SJeremy Fitzhardinge * saved in the store.
4614bac07c9SJeremy Fitzhardinge */
xenbus_alloc_evtchn(struct xenbus_device * dev,evtchn_port_t * port)4620102e4efSYan Yankovskyi int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port)
4634bac07c9SJeremy Fitzhardinge {
4644bac07c9SJeremy Fitzhardinge struct evtchn_alloc_unbound alloc_unbound;
4654bac07c9SJeremy Fitzhardinge int err;
4664bac07c9SJeremy Fitzhardinge
4674bac07c9SJeremy Fitzhardinge alloc_unbound.dom = DOMID_SELF;
4684bac07c9SJeremy Fitzhardinge alloc_unbound.remote_dom = dev->otherend_id;
4694bac07c9SJeremy Fitzhardinge
4704bac07c9SJeremy Fitzhardinge err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
4714bac07c9SJeremy Fitzhardinge &alloc_unbound);
4724bac07c9SJeremy Fitzhardinge if (err)
4734bac07c9SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "allocating event channel");
4744bac07c9SJeremy Fitzhardinge else
4754bac07c9SJeremy Fitzhardinge *port = alloc_unbound.port;
4764bac07c9SJeremy Fitzhardinge
4774bac07c9SJeremy Fitzhardinge return err;
4784bac07c9SJeremy Fitzhardinge }
4794bac07c9SJeremy Fitzhardinge EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
4804bac07c9SJeremy Fitzhardinge
4814bac07c9SJeremy Fitzhardinge
4824bac07c9SJeremy Fitzhardinge /**
4834bac07c9SJeremy Fitzhardinge * Free an existing event channel. Returns 0 on success or -errno on error.
4844bac07c9SJeremy Fitzhardinge */
xenbus_free_evtchn(struct xenbus_device * dev,evtchn_port_t port)4850102e4efSYan Yankovskyi int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port)
4864bac07c9SJeremy Fitzhardinge {
4874bac07c9SJeremy Fitzhardinge struct evtchn_close close;
4884bac07c9SJeremy Fitzhardinge int err;
4894bac07c9SJeremy Fitzhardinge
4904bac07c9SJeremy Fitzhardinge close.port = port;
4914bac07c9SJeremy Fitzhardinge
4924bac07c9SJeremy Fitzhardinge err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
4934bac07c9SJeremy Fitzhardinge if (err)
4940102e4efSYan Yankovskyi xenbus_dev_error(dev, err, "freeing event channel %u", port);
4954bac07c9SJeremy Fitzhardinge
4964bac07c9SJeremy Fitzhardinge return err;
4974bac07c9SJeremy Fitzhardinge }
4984bac07c9SJeremy Fitzhardinge EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
4994bac07c9SJeremy Fitzhardinge
5004bac07c9SJeremy Fitzhardinge
5014bac07c9SJeremy Fitzhardinge /**
5024bac07c9SJeremy Fitzhardinge * xenbus_map_ring_valloc
5034bac07c9SJeremy Fitzhardinge * @dev: xenbus device
504ccc9d90aSWei Liu * @gnt_refs: grant reference array
505ccc9d90aSWei Liu * @nr_grefs: number of grant references
5064bac07c9SJeremy Fitzhardinge * @vaddr: pointer to address to be filled out by mapping
5074bac07c9SJeremy Fitzhardinge *
508ccc9d90aSWei Liu * Map @nr_grefs pages of memory into this domain from another
509ccc9d90aSWei Liu * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs
510ccc9d90aSWei Liu * pages of virtual address space, maps the pages to that address, and
511578c1bb9SJuergen Gross * sets *vaddr to that address. Returns 0 on success, and -errno on
512ccc9d90aSWei Liu * error. If an error is returned, device will switch to
5134bac07c9SJeremy Fitzhardinge * XenbusStateClosing and the error message will be saved in XenStore.
5144bac07c9SJeremy Fitzhardinge */
xenbus_map_ring_valloc(struct xenbus_device * dev,grant_ref_t * gnt_refs,unsigned int nr_grefs,void ** vaddr)515ccc9d90aSWei Liu int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
516ccc9d90aSWei Liu unsigned int nr_grefs, void **vaddr)
5174bac07c9SJeremy Fitzhardinge {
5186b51fd3fSJuergen Gross int err;
5193848e4e0SJuergen Gross struct map_ring_valloc *info;
5206b51fd3fSJuergen Gross
5213848e4e0SJuergen Gross *vaddr = NULL;
5223848e4e0SJuergen Gross
5233848e4e0SJuergen Gross if (nr_grefs > XENBUS_MAX_RING_GRANTS)
5243848e4e0SJuergen Gross return -EINVAL;
5253848e4e0SJuergen Gross
5263848e4e0SJuergen Gross info = kzalloc(sizeof(*info), GFP_KERNEL);
5273848e4e0SJuergen Gross if (!info)
5283848e4e0SJuergen Gross return -ENOMEM;
5293848e4e0SJuergen Gross
5303848e4e0SJuergen Gross info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
531578c1bb9SJuergen Gross if (!info->node)
5323848e4e0SJuergen Gross err = -ENOMEM;
533578c1bb9SJuergen Gross else
5343848e4e0SJuergen Gross err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
5353848e4e0SJuergen Gross
5363848e4e0SJuergen Gross kfree(info->node);
5373848e4e0SJuergen Gross kfree(info);
5386b51fd3fSJuergen Gross return err;
5392c5d37d3SDaniel De Graaf }
5402c5d37d3SDaniel De Graaf EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
5412c5d37d3SDaniel De Graaf
542ccc9d90aSWei Liu /* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned
543ccc9d90aSWei Liu * long), e.g. 32-on-64. Caller is responsible for preparing the
544ccc9d90aSWei Liu * right array to feed into this function */
__xenbus_map_ring(struct xenbus_device * dev,grant_ref_t * gnt_refs,unsigned int nr_grefs,grant_handle_t * handles,struct map_ring_valloc * info,unsigned int flags,bool * leaked)545ccc9d90aSWei Liu static int __xenbus_map_ring(struct xenbus_device *dev,
546ccc9d90aSWei Liu grant_ref_t *gnt_refs,
547ccc9d90aSWei Liu unsigned int nr_grefs,
548ccc9d90aSWei Liu grant_handle_t *handles,
5493848e4e0SJuergen Gross struct map_ring_valloc *info,
550ccc9d90aSWei Liu unsigned int flags,
551ccc9d90aSWei Liu bool *leaked)
5522c5d37d3SDaniel De Graaf {
553ccc9d90aSWei Liu int i, j;
554ccc9d90aSWei Liu
5559cce2914SJulien Grall if (nr_grefs > XENBUS_MAX_RING_GRANTS)
556ccc9d90aSWei Liu return -EINVAL;
557ccc9d90aSWei Liu
558ccc9d90aSWei Liu for (i = 0; i < nr_grefs; i++) {
5593848e4e0SJuergen Gross gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags,
5603848e4e0SJuergen Gross gnt_refs[i], dev->otherend_id);
561ccc9d90aSWei Liu handles[i] = INVALID_GRANT_HANDLE;
562ccc9d90aSWei Liu }
563ccc9d90aSWei Liu
5643848e4e0SJuergen Gross gnttab_batch_map(info->map, i);
565ccc9d90aSWei Liu
566ccc9d90aSWei Liu for (i = 0; i < nr_grefs; i++) {
5673848e4e0SJuergen Gross if (info->map[i].status != GNTST_okay) {
5683848e4e0SJuergen Gross xenbus_dev_fatal(dev, info->map[i].status,
569ccc9d90aSWei Liu "mapping in shared page %d from domain %d",
570ccc9d90aSWei Liu gnt_refs[i], dev->otherend_id);
571ccc9d90aSWei Liu goto fail;
572ccc9d90aSWei Liu } else
5733848e4e0SJuergen Gross handles[i] = info->map[i].handle;
574ccc9d90aSWei Liu }
575ccc9d90aSWei Liu
576578c1bb9SJuergen Gross return 0;
577ccc9d90aSWei Liu
578ccc9d90aSWei Liu fail:
579ccc9d90aSWei Liu for (i = j = 0; i < nr_grefs; i++) {
580ccc9d90aSWei Liu if (handles[i] != INVALID_GRANT_HANDLE) {
5813848e4e0SJuergen Gross gnttab_set_unmap_op(&info->unmap[j],
5823848e4e0SJuergen Gross info->phys_addrs[i],
583ccc9d90aSWei Liu GNTMAP_host_map, handles[i]);
584ccc9d90aSWei Liu j++;
585ccc9d90aSWei Liu }
586ccc9d90aSWei Liu }
587ccc9d90aSWei Liu
588bb70913dSJing Yangyang BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j));
589ccc9d90aSWei Liu
590ccc9d90aSWei Liu *leaked = false;
591ccc9d90aSWei Liu for (i = 0; i < j; i++) {
5923848e4e0SJuergen Gross if (info->unmap[i].status != GNTST_okay) {
593ccc9d90aSWei Liu *leaked = true;
594ccc9d90aSWei Liu break;
595ccc9d90aSWei Liu }
596ccc9d90aSWei Liu }
597ccc9d90aSWei Liu
598578c1bb9SJuergen Gross return -ENOENT;
599ccc9d90aSWei Liu }
600ccc9d90aSWei Liu
601b28089a7SJuergen Gross /**
602b28089a7SJuergen Gross * xenbus_unmap_ring
603b28089a7SJuergen Gross * @dev: xenbus device
604b28089a7SJuergen Gross * @handles: grant handle array
605b28089a7SJuergen Gross * @nr_handles: number of handles in the array
606b28089a7SJuergen Gross * @vaddrs: addresses to unmap
607b28089a7SJuergen Gross *
608b28089a7SJuergen Gross * Unmap memory in this domain that was imported from another domain.
609b28089a7SJuergen Gross * Returns 0 on success and returns GNTST_* on error
610b28089a7SJuergen Gross * (see xen/include/interface/grant_table.h).
611b28089a7SJuergen Gross */
xenbus_unmap_ring(struct xenbus_device * dev,grant_handle_t * handles,unsigned int nr_handles,unsigned long * vaddrs)612b28089a7SJuergen Gross static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
613b28089a7SJuergen Gross unsigned int nr_handles, unsigned long *vaddrs)
614b28089a7SJuergen Gross {
615b28089a7SJuergen Gross struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
616b28089a7SJuergen Gross int i;
617b28089a7SJuergen Gross int err;
618b28089a7SJuergen Gross
619b28089a7SJuergen Gross if (nr_handles > XENBUS_MAX_RING_GRANTS)
620b28089a7SJuergen Gross return -EINVAL;
621b28089a7SJuergen Gross
622b28089a7SJuergen Gross for (i = 0; i < nr_handles; i++)
623b28089a7SJuergen Gross gnttab_set_unmap_op(&unmap[i], vaddrs[i],
624b28089a7SJuergen Gross GNTMAP_host_map, handles[i]);
625b28089a7SJuergen Gross
626bb70913dSJing Yangyang BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i));
627b28089a7SJuergen Gross
628b28089a7SJuergen Gross err = GNTST_okay;
629b28089a7SJuergen Gross for (i = 0; i < nr_handles; i++) {
630b28089a7SJuergen Gross if (unmap[i].status != GNTST_okay) {
631b28089a7SJuergen Gross xenbus_dev_error(dev, unmap[i].status,
632b28089a7SJuergen Gross "unmapping page at handle %d error %d",
633b28089a7SJuergen Gross handles[i], unmap[i].status);
634b28089a7SJuergen Gross err = unmap[i].status;
635b28089a7SJuergen Gross break;
636b28089a7SJuergen Gross }
637b28089a7SJuergen Gross }
638b28089a7SJuergen Gross
639b28089a7SJuergen Gross return err;
640b28089a7SJuergen Gross }
641b28089a7SJuergen Gross
xenbus_map_ring_setup_grant_hvm(unsigned long gfn,unsigned int goffset,unsigned int len,void * data)64289bf4b4eSJulien Grall static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
64389bf4b4eSJulien Grall unsigned int goffset,
64489bf4b4eSJulien Grall unsigned int len,
64589bf4b4eSJulien Grall void *data)
64689bf4b4eSJulien Grall {
6473848e4e0SJuergen Gross struct map_ring_valloc *info = data;
64889bf4b4eSJulien Grall unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
64989bf4b4eSJulien Grall
65089bf4b4eSJulien Grall info->phys_addrs[info->idx] = vaddr;
65189bf4b4eSJulien Grall info->addrs[info->idx] = vaddr;
65289bf4b4eSJulien Grall
65389bf4b4eSJulien Grall info->idx++;
65489bf4b4eSJulien Grall }
65589bf4b4eSJulien Grall
xenbus_map_ring_hvm(struct xenbus_device * dev,struct map_ring_valloc * info,grant_ref_t * gnt_ref,unsigned int nr_grefs,void ** vaddr)6563848e4e0SJuergen Gross static int xenbus_map_ring_hvm(struct xenbus_device *dev,
6573848e4e0SJuergen Gross struct map_ring_valloc *info,
658ccc9d90aSWei Liu grant_ref_t *gnt_ref,
659ccc9d90aSWei Liu unsigned int nr_grefs,
660ccc9d90aSWei Liu void **vaddr)
6612c5d37d3SDaniel De Graaf {
6623848e4e0SJuergen Gross struct xenbus_map_node *node = info->node;
6632c5d37d3SDaniel De Graaf int err;
6642c5d37d3SDaniel De Graaf void *addr;
665ccc9d90aSWei Liu bool leaked = false;
66689bf4b4eSJulien Grall unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
667ccc9d90aSWei Liu
6689e2369c0SRoger Pau Monne err = xen_alloc_unpopulated_pages(nr_pages, node->hvm.pages);
6692c5d37d3SDaniel De Graaf if (err)
6702c5d37d3SDaniel De Graaf goto out_err;
6712c5d37d3SDaniel De Graaf
67289bf4b4eSJulien Grall gnttab_foreach_grant(node->hvm.pages, nr_grefs,
67389bf4b4eSJulien Grall xenbus_map_ring_setup_grant_hvm,
6743848e4e0SJuergen Gross info);
6752c5d37d3SDaniel De Graaf
676ccc9d90aSWei Liu err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
6773848e4e0SJuergen Gross info, GNTMAP_host_map, &leaked);
678ccc9d90aSWei Liu node->nr_handles = nr_grefs;
679ccc9d90aSWei Liu
6802c5d37d3SDaniel De Graaf if (err)
681ccc9d90aSWei Liu goto out_free_ballooned_pages;
682ccc9d90aSWei Liu
68389bf4b4eSJulien Grall addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP,
684ccc9d90aSWei Liu PAGE_KERNEL);
685ccc9d90aSWei Liu if (!addr) {
686ccc9d90aSWei Liu err = -ENOMEM;
687ccc9d90aSWei Liu goto out_xenbus_unmap_ring;
688ccc9d90aSWei Liu }
689ccc9d90aSWei Liu
690ccc9d90aSWei Liu node->hvm.addr = addr;
6912c5d37d3SDaniel De Graaf
6922c5d37d3SDaniel De Graaf spin_lock(&xenbus_valloc_lock);
6932c5d37d3SDaniel De Graaf list_add(&node->next, &xenbus_valloc_pages);
6942c5d37d3SDaniel De Graaf spin_unlock(&xenbus_valloc_lock);
6952c5d37d3SDaniel De Graaf
6962c5d37d3SDaniel De Graaf *vaddr = addr;
6973848e4e0SJuergen Gross info->node = NULL;
6983848e4e0SJuergen Gross
6992c5d37d3SDaniel De Graaf return 0;
7002c5d37d3SDaniel De Graaf
701ccc9d90aSWei Liu out_xenbus_unmap_ring:
702ccc9d90aSWei Liu if (!leaked)
7033848e4e0SJuergen Gross xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs);
704ccc9d90aSWei Liu else
705ccc9d90aSWei Liu pr_alert("leaking %p size %u page(s)",
70689bf4b4eSJulien Grall addr, nr_pages);
707ccc9d90aSWei Liu out_free_ballooned_pages:
708ccc9d90aSWei Liu if (!leaked)
7099e2369c0SRoger Pau Monne xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
7108d0b8801SWei Liu out_err:
7112c5d37d3SDaniel De Graaf return err;
7122c5d37d3SDaniel De Graaf }
7134bac07c9SJeremy Fitzhardinge
7144bac07c9SJeremy Fitzhardinge /**
7154bac07c9SJeremy Fitzhardinge * xenbus_unmap_ring_vfree
7164bac07c9SJeremy Fitzhardinge * @dev: xenbus device
7174bac07c9SJeremy Fitzhardinge * @vaddr: addr to unmap
7184bac07c9SJeremy Fitzhardinge *
7194bac07c9SJeremy Fitzhardinge * Based on Rusty Russell's skeleton driver's unmap_page.
7204bac07c9SJeremy Fitzhardinge * Unmap a page of memory in this domain that was imported from another domain.
7214bac07c9SJeremy Fitzhardinge * Use xenbus_unmap_ring_vfree if you mapped in your memory with
7224bac07c9SJeremy Fitzhardinge * xenbus_map_ring_valloc (it will free the virtual address space).
7234bac07c9SJeremy Fitzhardinge * Returns 0 on success and returns GNTST_* on error
7244bac07c9SJeremy Fitzhardinge * (see xen/include/interface/grant_table.h).
7254bac07c9SJeremy Fitzhardinge */
xenbus_unmap_ring_vfree(struct xenbus_device * dev,void * vaddr)7264bac07c9SJeremy Fitzhardinge int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
7274bac07c9SJeremy Fitzhardinge {
7282c5d37d3SDaniel De Graaf return ring_ops->unmap(dev, vaddr);
7292c5d37d3SDaniel De Graaf }
7302c5d37d3SDaniel De Graaf EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
7312c5d37d3SDaniel De Graaf
732fe9c1c95SJuergen Gross #ifdef CONFIG_XEN_PV
map_ring_apply(pte_t * pte,unsigned long addr,void * data)733b723caecSChristoph Hellwig static int map_ring_apply(pte_t *pte, unsigned long addr, void *data)
734b723caecSChristoph Hellwig {
735b723caecSChristoph Hellwig struct map_ring_valloc *info = data;
736b723caecSChristoph Hellwig
737b723caecSChristoph Hellwig info->phys_addrs[info->idx++] = arbitrary_virt_to_machine(pte).maddr;
738b723caecSChristoph Hellwig return 0;
739b723caecSChristoph Hellwig }
740b723caecSChristoph Hellwig
xenbus_map_ring_pv(struct xenbus_device * dev,struct map_ring_valloc * info,grant_ref_t * gnt_refs,unsigned int nr_grefs,void ** vaddr)7413848e4e0SJuergen Gross static int xenbus_map_ring_pv(struct xenbus_device *dev,
7423848e4e0SJuergen Gross struct map_ring_valloc *info,
743fe9c1c95SJuergen Gross grant_ref_t *gnt_refs,
744fe9c1c95SJuergen Gross unsigned int nr_grefs,
745fe9c1c95SJuergen Gross void **vaddr)
746fe9c1c95SJuergen Gross {
7473848e4e0SJuergen Gross struct xenbus_map_node *node = info->node;
748fe9c1c95SJuergen Gross struct vm_struct *area;
749b723caecSChristoph Hellwig bool leaked = false;
750b723caecSChristoph Hellwig int err = -ENOMEM;
751fe9c1c95SJuergen Gross
752b723caecSChristoph Hellwig area = get_vm_area(XEN_PAGE_SIZE * nr_grefs, VM_IOREMAP);
753ba8c4234SDan Carpenter if (!area)
754fe9c1c95SJuergen Gross return -ENOMEM;
755b723caecSChristoph Hellwig if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
756b723caecSChristoph Hellwig XEN_PAGE_SIZE * nr_grefs, map_ring_apply, info))
757b723caecSChristoph Hellwig goto failed;
758fe9c1c95SJuergen Gross err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
7593848e4e0SJuergen Gross info, GNTMAP_host_map | GNTMAP_contains_pte,
760fe9c1c95SJuergen Gross &leaked);
761fe9c1c95SJuergen Gross if (err)
762fe9c1c95SJuergen Gross goto failed;
763fe9c1c95SJuergen Gross
764fe9c1c95SJuergen Gross node->nr_handles = nr_grefs;
765fe9c1c95SJuergen Gross node->pv.area = area;
766fe9c1c95SJuergen Gross
767fe9c1c95SJuergen Gross spin_lock(&xenbus_valloc_lock);
768fe9c1c95SJuergen Gross list_add(&node->next, &xenbus_valloc_pages);
769fe9c1c95SJuergen Gross spin_unlock(&xenbus_valloc_lock);
770fe9c1c95SJuergen Gross
771fe9c1c95SJuergen Gross *vaddr = area->addr;
7723848e4e0SJuergen Gross info->node = NULL;
7733848e4e0SJuergen Gross
774fe9c1c95SJuergen Gross return 0;
775fe9c1c95SJuergen Gross
776fe9c1c95SJuergen Gross failed:
777fe9c1c95SJuergen Gross if (!leaked)
778fe9c1c95SJuergen Gross free_vm_area(area);
779fe9c1c95SJuergen Gross else
780fe9c1c95SJuergen Gross pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
781fe9c1c95SJuergen Gross
782fe9c1c95SJuergen Gross return err;
783fe9c1c95SJuergen Gross }
784fe9c1c95SJuergen Gross
xenbus_unmap_ring_pv(struct xenbus_device * dev,void * vaddr)7853848e4e0SJuergen Gross static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr)
7862c5d37d3SDaniel De Graaf {
7872c5d37d3SDaniel De Graaf struct xenbus_map_node *node;
7889cce2914SJulien Grall struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
789cd12909cSDavid Vrabel unsigned int level;
790ccc9d90aSWei Liu int i;
791ccc9d90aSWei Liu bool leaked = false;
792ccc9d90aSWei Liu int err;
7934bac07c9SJeremy Fitzhardinge
7942c5d37d3SDaniel De Graaf spin_lock(&xenbus_valloc_lock);
7952c5d37d3SDaniel De Graaf list_for_each_entry(node, &xenbus_valloc_pages, next) {
796ccc9d90aSWei Liu if (node->pv.area->addr == vaddr) {
7972c5d37d3SDaniel De Graaf list_del(&node->next);
7982c5d37d3SDaniel De Graaf goto found;
7994bac07c9SJeremy Fitzhardinge }
8002c5d37d3SDaniel De Graaf }
8012c5d37d3SDaniel De Graaf node = NULL;
8022c5d37d3SDaniel De Graaf found:
8032c5d37d3SDaniel De Graaf spin_unlock(&xenbus_valloc_lock);
8044bac07c9SJeremy Fitzhardinge
8052c5d37d3SDaniel De Graaf if (!node) {
8064bac07c9SJeremy Fitzhardinge xenbus_dev_error(dev, -ENOENT,
8074bac07c9SJeremy Fitzhardinge "can't find mapped virtual address %p", vaddr);
8084bac07c9SJeremy Fitzhardinge return GNTST_bad_virt_addr;
8094bac07c9SJeremy Fitzhardinge }
8104bac07c9SJeremy Fitzhardinge
811ccc9d90aSWei Liu for (i = 0; i < node->nr_handles; i++) {
812ccc9d90aSWei Liu unsigned long addr;
8134bac07c9SJeremy Fitzhardinge
814ccc9d90aSWei Liu memset(&unmap[i], 0, sizeof(unmap[i]));
8157d567928SJulien Grall addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i);
816ccc9d90aSWei Liu unmap[i].host_addr = arbitrary_virt_to_machine(
817ccc9d90aSWei Liu lookup_address(addr, &level)).maddr;
818ccc9d90aSWei Liu unmap[i].dev_bus_addr = 0;
819ccc9d90aSWei Liu unmap[i].handle = node->handles[i];
820ccc9d90aSWei Liu }
821ccc9d90aSWei Liu
822bb70913dSJing Yangyang BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i));
8234bac07c9SJeremy Fitzhardinge
824ccc9d90aSWei Liu err = GNTST_okay;
825ccc9d90aSWei Liu leaked = false;
826ccc9d90aSWei Liu for (i = 0; i < node->nr_handles; i++) {
827ccc9d90aSWei Liu if (unmap[i].status != GNTST_okay) {
828ccc9d90aSWei Liu leaked = true;
829ccc9d90aSWei Liu xenbus_dev_error(dev, unmap[i].status,
8304bac07c9SJeremy Fitzhardinge "unmapping page at handle %d error %d",
831ccc9d90aSWei Liu node->handles[i], unmap[i].status);
832ccc9d90aSWei Liu err = unmap[i].status;
833ccc9d90aSWei Liu break;
834ccc9d90aSWei Liu }
835ccc9d90aSWei Liu }
836ccc9d90aSWei Liu
837ccc9d90aSWei Liu if (!leaked)
838ccc9d90aSWei Liu free_vm_area(node->pv.area);
839ccc9d90aSWei Liu else
840ccc9d90aSWei Liu pr_alert("leaking VM area %p size %u page(s)",
841ccc9d90aSWei Liu node->pv.area, node->nr_handles);
8424bac07c9SJeremy Fitzhardinge
8432c5d37d3SDaniel De Graaf kfree(node);
844ccc9d90aSWei Liu return err;
8454bac07c9SJeremy Fitzhardinge }
8464bac07c9SJeremy Fitzhardinge
847fe9c1c95SJuergen Gross static const struct xenbus_ring_ops ring_ops_pv = {
8483848e4e0SJuergen Gross .map = xenbus_map_ring_pv,
8493848e4e0SJuergen Gross .unmap = xenbus_unmap_ring_pv,
850fe9c1c95SJuergen Gross };
851fe9c1c95SJuergen Gross #endif
852fe9c1c95SJuergen Gross
8533848e4e0SJuergen Gross struct unmap_ring_hvm
85489bf4b4eSJulien Grall {
85589bf4b4eSJulien Grall unsigned int idx;
85689bf4b4eSJulien Grall unsigned long addrs[XENBUS_MAX_RING_GRANTS];
85789bf4b4eSJulien Grall };
85889bf4b4eSJulien Grall
xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,unsigned int goffset,unsigned int len,void * data)85989bf4b4eSJulien Grall static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
86089bf4b4eSJulien Grall unsigned int goffset,
86189bf4b4eSJulien Grall unsigned int len,
86289bf4b4eSJulien Grall void *data)
86389bf4b4eSJulien Grall {
8643848e4e0SJuergen Gross struct unmap_ring_hvm *info = data;
86589bf4b4eSJulien Grall
86689bf4b4eSJulien Grall info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
86789bf4b4eSJulien Grall
86889bf4b4eSJulien Grall info->idx++;
86989bf4b4eSJulien Grall }
87089bf4b4eSJulien Grall
xenbus_unmap_ring_hvm(struct xenbus_device * dev,void * vaddr)8713848e4e0SJuergen Gross static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
8722c5d37d3SDaniel De Graaf {
8732c5d37d3SDaniel De Graaf int rv;
8742c5d37d3SDaniel De Graaf struct xenbus_map_node *node;
8752c5d37d3SDaniel De Graaf void *addr;
8763848e4e0SJuergen Gross struct unmap_ring_hvm info = {
87789bf4b4eSJulien Grall .idx = 0,
87889bf4b4eSJulien Grall };
87989bf4b4eSJulien Grall unsigned int nr_pages;
8802c5d37d3SDaniel De Graaf
8812c5d37d3SDaniel De Graaf spin_lock(&xenbus_valloc_lock);
8822c5d37d3SDaniel De Graaf list_for_each_entry(node, &xenbus_valloc_pages, next) {
883ccc9d90aSWei Liu addr = node->hvm.addr;
8842c5d37d3SDaniel De Graaf if (addr == vaddr) {
8852c5d37d3SDaniel De Graaf list_del(&node->next);
8862c5d37d3SDaniel De Graaf goto found;
8872c5d37d3SDaniel De Graaf }
8882c5d37d3SDaniel De Graaf }
8895ac08001SJan Beulich node = addr = NULL;
8902c5d37d3SDaniel De Graaf found:
8912c5d37d3SDaniel De Graaf spin_unlock(&xenbus_valloc_lock);
8922c5d37d3SDaniel De Graaf
8932c5d37d3SDaniel De Graaf if (!node) {
8942c5d37d3SDaniel De Graaf xenbus_dev_error(dev, -ENOENT,
8952c5d37d3SDaniel De Graaf "can't find mapped virtual address %p", vaddr);
8962c5d37d3SDaniel De Graaf return GNTST_bad_virt_addr;
8972c5d37d3SDaniel De Graaf }
8982c5d37d3SDaniel De Graaf
89989bf4b4eSJulien Grall nr_pages = XENBUS_PAGES(node->nr_handles);
90089bf4b4eSJulien Grall
90189bf4b4eSJulien Grall gnttab_foreach_grant(node->hvm.pages, node->nr_handles,
90289bf4b4eSJulien Grall xenbus_unmap_ring_setup_grant_hvm,
90389bf4b4eSJulien Grall &info);
9042c5d37d3SDaniel De Graaf
905ccc9d90aSWei Liu rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
90689bf4b4eSJulien Grall info.addrs);
907c22fe519SJulien Grall if (!rv) {
908ccc9d90aSWei Liu vunmap(vaddr);
9099e2369c0SRoger Pau Monne xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
910c22fe519SJulien Grall }
9112c5d37d3SDaniel De Graaf else
91289bf4b4eSJulien Grall WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
9132c5d37d3SDaniel De Graaf
9142c5d37d3SDaniel De Graaf kfree(node);
9152c5d37d3SDaniel De Graaf return rv;
9162c5d37d3SDaniel De Graaf }
9174bac07c9SJeremy Fitzhardinge
9184bac07c9SJeremy Fitzhardinge /**
9194bac07c9SJeremy Fitzhardinge * xenbus_read_driver_state
9204bac07c9SJeremy Fitzhardinge * @path: path for driver
9214bac07c9SJeremy Fitzhardinge *
9224bac07c9SJeremy Fitzhardinge * Return the state of the driver rooted at the given store path, or
9234bac07c9SJeremy Fitzhardinge * XenbusStateUnknown if no state can be read.
9244bac07c9SJeremy Fitzhardinge */
xenbus_read_driver_state(const char * path)9254bac07c9SJeremy Fitzhardinge enum xenbus_state xenbus_read_driver_state(const char *path)
9264bac07c9SJeremy Fitzhardinge {
9274bac07c9SJeremy Fitzhardinge enum xenbus_state result;
9284bac07c9SJeremy Fitzhardinge int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
9294bac07c9SJeremy Fitzhardinge if (err)
9304bac07c9SJeremy Fitzhardinge result = XenbusStateUnknown;
9314bac07c9SJeremy Fitzhardinge
9324bac07c9SJeremy Fitzhardinge return result;
9334bac07c9SJeremy Fitzhardinge }
9344bac07c9SJeremy Fitzhardinge EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
9352c5d37d3SDaniel De Graaf
9362c5d37d3SDaniel De Graaf static const struct xenbus_ring_ops ring_ops_hvm = {
9373848e4e0SJuergen Gross .map = xenbus_map_ring_hvm,
9383848e4e0SJuergen Gross .unmap = xenbus_unmap_ring_hvm,
9392c5d37d3SDaniel De Graaf };
9402c5d37d3SDaniel De Graaf
xenbus_ring_ops_init(void)9412c5d37d3SDaniel De Graaf void __init xenbus_ring_ops_init(void)
9422c5d37d3SDaniel De Graaf {
943fe9c1c95SJuergen Gross #ifdef CONFIG_XEN_PV
944be3e9cf3SMukesh Rathor if (!xen_feature(XENFEAT_auto_translated_physmap))
9452c5d37d3SDaniel De Graaf ring_ops = &ring_ops_pv;
9462c5d37d3SDaniel De Graaf else
947fe9c1c95SJuergen Gross #endif
9482c5d37d3SDaniel De Graaf ring_ops = &ring_ops_hvm;
9492c5d37d3SDaniel De Graaf }
950