1 /******************************************************************************
2  * Talks to Xen Store to figure out what devices we have.
3  *
4  * Copyright (C) 2005 Rusty Russell, IBM Corporation
5  * Copyright (C) 2005 Mike Wray, Hewlett-Packard
6  * Copyright (C) 2005, 2006 XenSource Ltd
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version 2
10  * as published by the Free Software Foundation; or, when distributed
11  * separately from the Linux kernel or incorporated into other
12  * software packages, subject to the following license:
13  *
14  * Permission is hereby granted, free of charge, to any person obtaining a copy
15  * of this source file (the "Software"), to deal in the Software without
16  * restriction, including without limitation the rights to use, copy, modify,
17  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18  * and to permit persons to whom the Software is furnished to do so, subject to
19  * the following conditions:
20  *
21  * The above copyright notice and this permission notice shall be included in
22  * all copies or substantial portions of the Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30  * IN THE SOFTWARE.
31  */
32 
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #define dev_fmt pr_fmt
35 
36 #define DPRINTK(fmt, args...)				\
37 	pr_debug("xenbus_probe (%s:%d) " fmt ".\n",	\
38 		 __func__, __LINE__, ##args)
39 
40 #include <linux/kernel.h>
41 #include <linux/err.h>
42 #include <linux/string.h>
43 #include <linux/ctype.h>
44 #include <linux/fcntl.h>
45 #include <linux/mm.h>
46 #include <linux/proc_fs.h>
47 #include <linux/notifier.h>
48 #include <linux/kthread.h>
49 #include <linux/mutex.h>
50 #include <linux/io.h>
51 #include <linux/slab.h>
52 #include <linux/module.h>
53 
54 #include <asm/page.h>
55 #include <asm/xen/hypervisor.h>
56 
57 #include <xen/xen.h>
58 #include <xen/xenbus.h>
59 #include <xen/events.h>
60 #include <xen/xen-ops.h>
61 #include <xen/page.h>
62 
63 #include <xen/hvm.h>
64 
65 #include "xenbus.h"
66 
67 
68 static int xs_init_irq = -1;
69 int xen_store_evtchn;
70 EXPORT_SYMBOL_GPL(xen_store_evtchn);
71 
72 struct xenstore_domain_interface *xen_store_interface;
73 EXPORT_SYMBOL_GPL(xen_store_interface);
74 
75 #define XS_INTERFACE_READY \
76 	((xen_store_interface != NULL) && \
77 	 (xen_store_interface->connection == XENSTORE_CONNECTED))
78 
79 enum xenstore_init xen_store_domain_type;
80 EXPORT_SYMBOL_GPL(xen_store_domain_type);
81 
82 static unsigned long xen_store_gfn;
83 
84 static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
85 
86 /* If something in array of ids matches this device, return it. */
87 static const struct xenbus_device_id *
88 match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
89 {
90 	for (; *arr->devicetype != '\0'; arr++) {
91 		if (!strcmp(arr->devicetype, dev->devicetype))
92 			return arr;
93 	}
94 	return NULL;
95 }
96 
97 int xenbus_match(struct device *_dev, struct device_driver *_drv)
98 {
99 	struct xenbus_driver *drv = to_xenbus_driver(_drv);
100 
101 	if (!drv->ids)
102 		return 0;
103 
104 	return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
105 }
106 EXPORT_SYMBOL_GPL(xenbus_match);
107 
108 
109 static void free_otherend_details(struct xenbus_device *dev)
110 {
111 	kfree(dev->otherend);
112 	dev->otherend = NULL;
113 }
114 
115 
116 static void free_otherend_watch(struct xenbus_device *dev)
117 {
118 	if (dev->otherend_watch.node) {
119 		unregister_xenbus_watch(&dev->otherend_watch);
120 		kfree(dev->otherend_watch.node);
121 		dev->otherend_watch.node = NULL;
122 	}
123 }
124 
125 
126 static int talk_to_otherend(struct xenbus_device *dev)
127 {
128 	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
129 
130 	free_otherend_watch(dev);
131 	free_otherend_details(dev);
132 
133 	return drv->read_otherend_details(dev);
134 }
135 
136 
137 
138 static int watch_otherend(struct xenbus_device *dev)
139 {
140 	struct xen_bus_type *bus =
141 		container_of(dev->dev.bus, struct xen_bus_type, bus);
142 
143 	return xenbus_watch_pathfmt(dev, &dev->otherend_watch,
144 				    bus->otherend_will_handle,
145 				    bus->otherend_changed,
146 				    "%s/%s", dev->otherend, "state");
147 }
148 
149 
150 int xenbus_read_otherend_details(struct xenbus_device *xendev,
151 				 char *id_node, char *path_node)
152 {
153 	int err = xenbus_gather(XBT_NIL, xendev->nodename,
154 				id_node, "%i", &xendev->otherend_id,
155 				path_node, NULL, &xendev->otherend,
156 				NULL);
157 	if (err) {
158 		xenbus_dev_fatal(xendev, err,
159 				 "reading other end details from %s",
160 				 xendev->nodename);
161 		return err;
162 	}
163 	if (strlen(xendev->otherend) == 0 ||
164 	    !xenbus_exists(XBT_NIL, xendev->otherend, "")) {
165 		xenbus_dev_fatal(xendev, -ENOENT,
166 				 "unable to read other end from %s.  "
167 				 "missing or inaccessible.",
168 				 xendev->nodename);
169 		free_otherend_details(xendev);
170 		return -ENOENT;
171 	}
172 
173 	return 0;
174 }
175 EXPORT_SYMBOL_GPL(xenbus_read_otherend_details);
176 
177 void xenbus_otherend_changed(struct xenbus_watch *watch,
178 			     const char *path, const char *token,
179 			     int ignore_on_shutdown)
180 {
181 	struct xenbus_device *dev =
182 		container_of(watch, struct xenbus_device, otherend_watch);
183 	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
184 	enum xenbus_state state;
185 
186 	/* Protect us against watches firing on old details when the otherend
187 	   details change, say immediately after a resume. */
188 	if (!dev->otherend ||
189 	    strncmp(dev->otherend, path, strlen(dev->otherend))) {
190 		dev_dbg(&dev->dev, "Ignoring watch at %s\n", path);
191 		return;
192 	}
193 
194 	state = xenbus_read_driver_state(dev->otherend);
195 
196 	dev_dbg(&dev->dev, "state is %d, (%s), %s, %s\n",
197 		state, xenbus_strstate(state), dev->otherend_watch.node, path);
198 
199 	/*
200 	 * Ignore xenbus transitions during shutdown. This prevents us doing
201 	 * work that can fail e.g., when the rootfs is gone.
202 	 */
203 	if (system_state > SYSTEM_RUNNING) {
204 		if (ignore_on_shutdown && (state == XenbusStateClosing))
205 			xenbus_frontend_closed(dev);
206 		return;
207 	}
208 
209 	if (drv->otherend_changed)
210 		drv->otherend_changed(dev, state);
211 }
212 EXPORT_SYMBOL_GPL(xenbus_otherend_changed);
213 
214 #define XENBUS_SHOW_STAT(name)						\
215 static ssize_t name##_show(struct device *_dev,				\
216 			   struct device_attribute *attr,		\
217 			   char *buf)					\
218 {									\
219 	struct xenbus_device *dev = to_xenbus_device(_dev);		\
220 									\
221 	return sprintf(buf, "%d\n", atomic_read(&dev->name));		\
222 }									\
223 static DEVICE_ATTR_RO(name)
224 
225 XENBUS_SHOW_STAT(event_channels);
226 XENBUS_SHOW_STAT(events);
227 XENBUS_SHOW_STAT(spurious_events);
228 XENBUS_SHOW_STAT(jiffies_eoi_delayed);
229 
230 static ssize_t spurious_threshold_show(struct device *_dev,
231 				       struct device_attribute *attr,
232 				       char *buf)
233 {
234 	struct xenbus_device *dev = to_xenbus_device(_dev);
235 
236 	return sprintf(buf, "%d\n", dev->spurious_threshold);
237 }
238 
239 static ssize_t spurious_threshold_store(struct device *_dev,
240 					struct device_attribute *attr,
241 					const char *buf, size_t count)
242 {
243 	struct xenbus_device *dev = to_xenbus_device(_dev);
244 	unsigned int val;
245 	ssize_t ret;
246 
247 	ret = kstrtouint(buf, 0, &val);
248 	if (ret)
249 		return ret;
250 
251 	dev->spurious_threshold = val;
252 
253 	return count;
254 }
255 
256 static DEVICE_ATTR_RW(spurious_threshold);
257 
258 static struct attribute *xenbus_attrs[] = {
259 	&dev_attr_event_channels.attr,
260 	&dev_attr_events.attr,
261 	&dev_attr_spurious_events.attr,
262 	&dev_attr_jiffies_eoi_delayed.attr,
263 	&dev_attr_spurious_threshold.attr,
264 	NULL
265 };
266 
267 static const struct attribute_group xenbus_group = {
268 	.name = "xenbus",
269 	.attrs = xenbus_attrs,
270 };
271 
272 int xenbus_dev_probe(struct device *_dev)
273 {
274 	struct xenbus_device *dev = to_xenbus_device(_dev);
275 	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
276 	const struct xenbus_device_id *id;
277 	int err;
278 
279 	DPRINTK("%s", dev->nodename);
280 
281 	if (!drv->probe) {
282 		err = -ENODEV;
283 		goto fail;
284 	}
285 
286 	id = match_device(drv->ids, dev);
287 	if (!id) {
288 		err = -ENODEV;
289 		goto fail;
290 	}
291 
292 	err = talk_to_otherend(dev);
293 	if (err) {
294 		dev_warn(&dev->dev, "talk_to_otherend on %s failed.\n",
295 			 dev->nodename);
296 		return err;
297 	}
298 
299 	if (!try_module_get(drv->driver.owner)) {
300 		dev_warn(&dev->dev, "failed to acquire module reference on '%s'\n",
301 			 drv->driver.name);
302 		err = -ESRCH;
303 		goto fail;
304 	}
305 
306 	down(&dev->reclaim_sem);
307 	err = drv->probe(dev, id);
308 	up(&dev->reclaim_sem);
309 	if (err)
310 		goto fail_put;
311 
312 	err = watch_otherend(dev);
313 	if (err) {
314 		dev_warn(&dev->dev, "watch_otherend on %s failed.\n",
315 		       dev->nodename);
316 		goto fail_remove;
317 	}
318 
319 	dev->spurious_threshold = 1;
320 	if (sysfs_create_group(&dev->dev.kobj, &xenbus_group))
321 		dev_warn(&dev->dev, "sysfs_create_group on %s failed.\n",
322 			 dev->nodename);
323 
324 	return 0;
325 fail_remove:
326 	if (drv->remove) {
327 		down(&dev->reclaim_sem);
328 		drv->remove(dev);
329 		up(&dev->reclaim_sem);
330 	}
331 fail_put:
332 	module_put(drv->driver.owner);
333 fail:
334 	xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
335 	return err;
336 }
337 EXPORT_SYMBOL_GPL(xenbus_dev_probe);
338 
339 void xenbus_dev_remove(struct device *_dev)
340 {
341 	struct xenbus_device *dev = to_xenbus_device(_dev);
342 	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
343 
344 	DPRINTK("%s", dev->nodename);
345 
346 	sysfs_remove_group(&dev->dev.kobj, &xenbus_group);
347 
348 	free_otherend_watch(dev);
349 
350 	if (drv->remove) {
351 		down(&dev->reclaim_sem);
352 		drv->remove(dev);
353 		up(&dev->reclaim_sem);
354 	}
355 
356 	module_put(drv->driver.owner);
357 
358 	free_otherend_details(dev);
359 
360 	/*
361 	 * If the toolstack has forced the device state to closing then set
362 	 * the state to closed now to allow it to be cleaned up.
363 	 * Similarly, if the driver does not support re-bind, set the
364 	 * closed.
365 	 */
366 	if (!drv->allow_rebind ||
367 	    xenbus_read_driver_state(dev->nodename) == XenbusStateClosing)
368 		xenbus_switch_state(dev, XenbusStateClosed);
369 }
370 EXPORT_SYMBOL_GPL(xenbus_dev_remove);
371 
372 int xenbus_register_driver_common(struct xenbus_driver *drv,
373 				  struct xen_bus_type *bus,
374 				  struct module *owner, const char *mod_name)
375 {
376 	drv->driver.name = drv->name ? drv->name : drv->ids[0].devicetype;
377 	drv->driver.bus = &bus->bus;
378 	drv->driver.owner = owner;
379 	drv->driver.mod_name = mod_name;
380 
381 	return driver_register(&drv->driver);
382 }
383 EXPORT_SYMBOL_GPL(xenbus_register_driver_common);
384 
385 void xenbus_unregister_driver(struct xenbus_driver *drv)
386 {
387 	driver_unregister(&drv->driver);
388 }
389 EXPORT_SYMBOL_GPL(xenbus_unregister_driver);
390 
391 struct xb_find_info {
392 	struct xenbus_device *dev;
393 	const char *nodename;
394 };
395 
396 static int cmp_dev(struct device *dev, void *data)
397 {
398 	struct xenbus_device *xendev = to_xenbus_device(dev);
399 	struct xb_find_info *info = data;
400 
401 	if (!strcmp(xendev->nodename, info->nodename)) {
402 		info->dev = xendev;
403 		get_device(dev);
404 		return 1;
405 	}
406 	return 0;
407 }
408 
409 static struct xenbus_device *xenbus_device_find(const char *nodename,
410 						struct bus_type *bus)
411 {
412 	struct xb_find_info info = { .dev = NULL, .nodename = nodename };
413 
414 	bus_for_each_dev(bus, NULL, &info, cmp_dev);
415 	return info.dev;
416 }
417 
418 static int cleanup_dev(struct device *dev, void *data)
419 {
420 	struct xenbus_device *xendev = to_xenbus_device(dev);
421 	struct xb_find_info *info = data;
422 	int len = strlen(info->nodename);
423 
424 	DPRINTK("%s", info->nodename);
425 
426 	/* Match the info->nodename path, or any subdirectory of that path. */
427 	if (strncmp(xendev->nodename, info->nodename, len))
428 		return 0;
429 
430 	/* If the node name is longer, ensure it really is a subdirectory. */
431 	if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/'))
432 		return 0;
433 
434 	info->dev = xendev;
435 	get_device(dev);
436 	return 1;
437 }
438 
439 static void xenbus_cleanup_devices(const char *path, struct bus_type *bus)
440 {
441 	struct xb_find_info info = { .nodename = path };
442 
443 	do {
444 		info.dev = NULL;
445 		bus_for_each_dev(bus, NULL, &info, cleanup_dev);
446 		if (info.dev) {
447 			device_unregister(&info.dev->dev);
448 			put_device(&info.dev->dev);
449 		}
450 	} while (info.dev);
451 }
452 
453 static void xenbus_dev_release(struct device *dev)
454 {
455 	if (dev)
456 		kfree(to_xenbus_device(dev));
457 }
458 
459 static ssize_t nodename_show(struct device *dev,
460 			     struct device_attribute *attr, char *buf)
461 {
462 	return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
463 }
464 static DEVICE_ATTR_RO(nodename);
465 
466 static ssize_t devtype_show(struct device *dev,
467 			    struct device_attribute *attr, char *buf)
468 {
469 	return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
470 }
471 static DEVICE_ATTR_RO(devtype);
472 
473 static ssize_t modalias_show(struct device *dev,
474 			     struct device_attribute *attr, char *buf)
475 {
476 	return sprintf(buf, "%s:%s\n", dev->bus->name,
477 		       to_xenbus_device(dev)->devicetype);
478 }
479 static DEVICE_ATTR_RO(modalias);
480 
481 static ssize_t state_show(struct device *dev,
482 			    struct device_attribute *attr, char *buf)
483 {
484 	return sprintf(buf, "%s\n",
485 			xenbus_strstate(to_xenbus_device(dev)->state));
486 }
487 static DEVICE_ATTR_RO(state);
488 
489 static struct attribute *xenbus_dev_attrs[] = {
490 	&dev_attr_nodename.attr,
491 	&dev_attr_devtype.attr,
492 	&dev_attr_modalias.attr,
493 	&dev_attr_state.attr,
494 	NULL,
495 };
496 
497 static const struct attribute_group xenbus_dev_group = {
498 	.attrs = xenbus_dev_attrs,
499 };
500 
501 const struct attribute_group *xenbus_dev_groups[] = {
502 	&xenbus_dev_group,
503 	NULL,
504 };
505 EXPORT_SYMBOL_GPL(xenbus_dev_groups);
506 
507 int xenbus_probe_node(struct xen_bus_type *bus,
508 		      const char *type,
509 		      const char *nodename)
510 {
511 	char devname[XEN_BUS_ID_SIZE];
512 	int err;
513 	struct xenbus_device *xendev;
514 	size_t stringlen;
515 	char *tmpstring;
516 
517 	enum xenbus_state state = xenbus_read_driver_state(nodename);
518 
519 	if (state != XenbusStateInitialising) {
520 		/* Device is not new, so ignore it.  This can happen if a
521 		   device is going away after switching to Closed.  */
522 		return 0;
523 	}
524 
525 	stringlen = strlen(nodename) + 1 + strlen(type) + 1;
526 	xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL);
527 	if (!xendev)
528 		return -ENOMEM;
529 
530 	xendev->state = XenbusStateInitialising;
531 
532 	/* Copy the strings into the extra space. */
533 
534 	tmpstring = (char *)(xendev + 1);
535 	strcpy(tmpstring, nodename);
536 	xendev->nodename = tmpstring;
537 
538 	tmpstring += strlen(tmpstring) + 1;
539 	strcpy(tmpstring, type);
540 	xendev->devicetype = tmpstring;
541 	init_completion(&xendev->down);
542 
543 	xendev->dev.bus = &bus->bus;
544 	xendev->dev.release = xenbus_dev_release;
545 
546 	err = bus->get_bus_id(devname, xendev->nodename);
547 	if (err)
548 		goto fail;
549 
550 	dev_set_name(&xendev->dev, "%s", devname);
551 	sema_init(&xendev->reclaim_sem, 1);
552 
553 	/* Register with generic device framework. */
554 	err = device_register(&xendev->dev);
555 	if (err) {
556 		put_device(&xendev->dev);
557 		xendev = NULL;
558 		goto fail;
559 	}
560 
561 	return 0;
562 fail:
563 	kfree(xendev);
564 	return err;
565 }
566 EXPORT_SYMBOL_GPL(xenbus_probe_node);
567 
568 static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
569 {
570 	int err = 0;
571 	char **dir;
572 	unsigned int dir_n = 0;
573 	int i;
574 
575 	dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n);
576 	if (IS_ERR(dir))
577 		return PTR_ERR(dir);
578 
579 	for (i = 0; i < dir_n; i++) {
580 		err = bus->probe(bus, type, dir[i]);
581 		if (err)
582 			break;
583 	}
584 
585 	kfree(dir);
586 	return err;
587 }
588 
589 int xenbus_probe_devices(struct xen_bus_type *bus)
590 {
591 	int err = 0;
592 	char **dir;
593 	unsigned int i, dir_n;
594 
595 	dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n);
596 	if (IS_ERR(dir))
597 		return PTR_ERR(dir);
598 
599 	for (i = 0; i < dir_n; i++) {
600 		err = xenbus_probe_device_type(bus, dir[i]);
601 		if (err)
602 			break;
603 	}
604 
605 	kfree(dir);
606 	return err;
607 }
608 EXPORT_SYMBOL_GPL(xenbus_probe_devices);
609 
610 static unsigned int char_count(const char *str, char c)
611 {
612 	unsigned int i, ret = 0;
613 
614 	for (i = 0; str[i]; i++)
615 		if (str[i] == c)
616 			ret++;
617 	return ret;
618 }
619 
620 static int strsep_len(const char *str, char c, unsigned int len)
621 {
622 	unsigned int i;
623 
624 	for (i = 0; str[i]; i++)
625 		if (str[i] == c) {
626 			if (len == 0)
627 				return i;
628 			len--;
629 		}
630 	return (len == 0) ? i : -ERANGE;
631 }
632 
633 void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
634 {
635 	int exists, rootlen;
636 	struct xenbus_device *dev;
637 	char type[XEN_BUS_ID_SIZE];
638 	const char *p, *root;
639 
640 	if (char_count(node, '/') < 2)
641 		return;
642 
643 	exists = xenbus_exists(XBT_NIL, node, "");
644 	if (!exists) {
645 		xenbus_cleanup_devices(node, &bus->bus);
646 		return;
647 	}
648 
649 	/* backend/<type>/... or device/<type>/... */
650 	p = strchr(node, '/') + 1;
651 	snprintf(type, XEN_BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
652 	type[XEN_BUS_ID_SIZE-1] = '\0';
653 
654 	rootlen = strsep_len(node, '/', bus->levels);
655 	if (rootlen < 0)
656 		return;
657 	root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node);
658 	if (!root)
659 		return;
660 
661 	dev = xenbus_device_find(root, &bus->bus);
662 	if (!dev)
663 		xenbus_probe_node(bus, type, root);
664 	else
665 		put_device(&dev->dev);
666 
667 	kfree(root);
668 }
669 EXPORT_SYMBOL_GPL(xenbus_dev_changed);
670 
671 int xenbus_dev_suspend(struct device *dev)
672 {
673 	int err = 0;
674 	struct xenbus_driver *drv;
675 	struct xenbus_device *xdev
676 		= container_of(dev, struct xenbus_device, dev);
677 
678 	DPRINTK("%s", xdev->nodename);
679 
680 	if (dev->driver == NULL)
681 		return 0;
682 	drv = to_xenbus_driver(dev->driver);
683 	if (drv->suspend)
684 		err = drv->suspend(xdev);
685 	if (err)
686 		dev_warn(dev, "suspend failed: %i\n", err);
687 	return 0;
688 }
689 EXPORT_SYMBOL_GPL(xenbus_dev_suspend);
690 
691 int xenbus_dev_resume(struct device *dev)
692 {
693 	int err;
694 	struct xenbus_driver *drv;
695 	struct xenbus_device *xdev
696 		= container_of(dev, struct xenbus_device, dev);
697 
698 	DPRINTK("%s", xdev->nodename);
699 
700 	if (dev->driver == NULL)
701 		return 0;
702 	drv = to_xenbus_driver(dev->driver);
703 	err = talk_to_otherend(xdev);
704 	if (err) {
705 		dev_warn(dev, "resume (talk_to_otherend) failed: %i\n", err);
706 		return err;
707 	}
708 
709 	xdev->state = XenbusStateInitialising;
710 
711 	if (drv->resume) {
712 		err = drv->resume(xdev);
713 		if (err) {
714 			dev_warn(dev, "resume failed: %i\n", err);
715 			return err;
716 		}
717 	}
718 
719 	err = watch_otherend(xdev);
720 	if (err) {
721 		dev_warn(dev, "resume (watch_otherend) failed: %d\n", err);
722 		return err;
723 	}
724 
725 	return 0;
726 }
727 EXPORT_SYMBOL_GPL(xenbus_dev_resume);
728 
729 int xenbus_dev_cancel(struct device *dev)
730 {
731 	/* Do nothing */
732 	DPRINTK("cancel");
733 	return 0;
734 }
735 EXPORT_SYMBOL_GPL(xenbus_dev_cancel);
736 
737 /* A flag to determine if xenstored is 'ready' (i.e. has started) */
738 int xenstored_ready;
739 
740 
741 int register_xenstore_notifier(struct notifier_block *nb)
742 {
743 	int ret = 0;
744 
745 	if (xenstored_ready > 0)
746 		ret = nb->notifier_call(nb, 0, NULL);
747 	else
748 		blocking_notifier_chain_register(&xenstore_chain, nb);
749 
750 	return ret;
751 }
752 EXPORT_SYMBOL_GPL(register_xenstore_notifier);
753 
754 void unregister_xenstore_notifier(struct notifier_block *nb)
755 {
756 	blocking_notifier_chain_unregister(&xenstore_chain, nb);
757 }
758 EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
759 
760 static void xenbus_probe(void)
761 {
762 	xenstored_ready = 1;
763 
764 	if (!xen_store_interface)
765 		xen_store_interface = memremap(xen_store_gfn << XEN_PAGE_SHIFT,
766 					       XEN_PAGE_SIZE, MEMREMAP_WB);
767 	/*
768 	 * Now it is safe to free the IRQ used for xenstore late
769 	 * initialization. No need to unbind: it is about to be
770 	 * bound again from xb_init_comms. Note that calling
771 	 * unbind_from_irqhandler now would result in xen_evtchn_close()
772 	 * being called and the event channel not being enabled again
773 	 * afterwards, resulting in missed event notifications.
774 	 */
775 	if (xs_init_irq >= 0)
776 		free_irq(xs_init_irq, &xb_waitq);
777 
778 	/*
779 	 * In the HVM case, xenbus_init() deferred its call to
780 	 * xs_init() in case callbacks were not operational yet.
781 	 * So do it now.
782 	 */
783 	if (xen_store_domain_type == XS_HVM)
784 		xs_init();
785 
786 	/* Notify others that xenstore is up */
787 	blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
788 }
789 
790 /*
791  * Returns true when XenStore init must be deferred in order to
792  * allow the PCI platform device to be initialised, before we
793  * can actually have event channel interrupts working.
794  */
795 static bool xs_hvm_defer_init_for_callback(void)
796 {
797 #ifdef CONFIG_XEN_PVHVM
798 	return xen_store_domain_type == XS_HVM &&
799 		!xen_have_vector_callback;
800 #else
801 	return false;
802 #endif
803 }
804 
805 static int xenbus_probe_thread(void *unused)
806 {
807 	DEFINE_WAIT(w);
808 
809 	/*
810 	 * We actually just want to wait for *any* trigger of xb_waitq,
811 	 * and run xenbus_probe() the moment it occurs.
812 	 */
813 	prepare_to_wait(&xb_waitq, &w, TASK_INTERRUPTIBLE);
814 	schedule();
815 	finish_wait(&xb_waitq, &w);
816 
817 	DPRINTK("probing");
818 	xenbus_probe();
819 	return 0;
820 }
821 
822 static int __init xenbus_probe_initcall(void)
823 {
824 	if (!xen_domain())
825 		return -ENODEV;
826 
827 	/*
828 	 * Probe XenBus here in the XS_PV case, and also XS_HVM unless we
829 	 * need to wait for the platform PCI device to come up or
830 	 * xen_store_interface is not ready.
831 	 */
832 	if (xen_store_domain_type == XS_PV ||
833 	    (xen_store_domain_type == XS_HVM &&
834 	     !xs_hvm_defer_init_for_callback() &&
835 	     XS_INTERFACE_READY))
836 		xenbus_probe();
837 
838 	/*
839 	 * For XS_LOCAL or when xen_store_interface is not ready, spawn a
840 	 * thread which will wait for xenstored or a xenstore-stubdom to be
841 	 * started, then probe.  It will be triggered when communication
842 	 * starts happening, by waiting on xb_waitq.
843 	 */
844 	if (xen_store_domain_type == XS_LOCAL || !XS_INTERFACE_READY) {
845 		struct task_struct *probe_task;
846 
847 		probe_task = kthread_run(xenbus_probe_thread, NULL,
848 					 "xenbus_probe");
849 		if (IS_ERR(probe_task))
850 			return PTR_ERR(probe_task);
851 	}
852 	return 0;
853 }
854 device_initcall(xenbus_probe_initcall);
855 
856 int xen_set_callback_via(uint64_t via)
857 {
858 	struct xen_hvm_param a;
859 	int ret;
860 
861 	a.domid = DOMID_SELF;
862 	a.index = HVM_PARAM_CALLBACK_IRQ;
863 	a.value = via;
864 
865 	ret = HYPERVISOR_hvm_op(HVMOP_set_param, &a);
866 	if (ret)
867 		return ret;
868 
869 	/*
870 	 * If xenbus_probe_initcall() deferred the xenbus_probe()
871 	 * due to the callback not functioning yet, we can do it now.
872 	 */
873 	if (!xenstored_ready && xs_hvm_defer_init_for_callback())
874 		xenbus_probe();
875 
876 	return ret;
877 }
878 EXPORT_SYMBOL_GPL(xen_set_callback_via);
879 
880 /* Set up event channel for xenstored which is run as a local process
881  * (this is normally used only in dom0)
882  */
883 static int __init xenstored_local_init(void)
884 {
885 	int err = -ENOMEM;
886 	unsigned long page = 0;
887 	struct evtchn_alloc_unbound alloc_unbound;
888 
889 	/* Allocate Xenstore page */
890 	page = get_zeroed_page(GFP_KERNEL);
891 	if (!page)
892 		goto out_err;
893 
894 	xen_store_gfn = virt_to_gfn((void *)page);
895 
896 	/* Next allocate a local port which xenstored can bind to */
897 	alloc_unbound.dom        = DOMID_SELF;
898 	alloc_unbound.remote_dom = DOMID_SELF;
899 
900 	err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
901 					  &alloc_unbound);
902 	if (err == -ENOSYS)
903 		goto out_err;
904 
905 	BUG_ON(err);
906 	xen_store_evtchn = alloc_unbound.port;
907 
908 	return 0;
909 
910  out_err:
911 	if (page != 0)
912 		free_page(page);
913 	return err;
914 }
915 
916 static int xenbus_resume_cb(struct notifier_block *nb,
917 			    unsigned long action, void *data)
918 {
919 	int err = 0;
920 
921 	if (xen_hvm_domain()) {
922 		uint64_t v = 0;
923 
924 		err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
925 		if (!err && v)
926 			xen_store_evtchn = v;
927 		else
928 			pr_warn("Cannot update xenstore event channel: %d\n",
929 				err);
930 	} else
931 		xen_store_evtchn = xen_start_info->store_evtchn;
932 
933 	return err;
934 }
935 
936 static struct notifier_block xenbus_resume_nb = {
937 	.notifier_call = xenbus_resume_cb,
938 };
939 
940 static irqreturn_t xenbus_late_init(int irq, void *unused)
941 {
942 	int err;
943 	uint64_t v = 0;
944 
945 	err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
946 	if (err || !v || !~v)
947 		return IRQ_HANDLED;
948 	xen_store_gfn = (unsigned long)v;
949 
950 	wake_up(&xb_waitq);
951 	return IRQ_HANDLED;
952 }
953 
954 static int __init xenbus_init(void)
955 {
956 	int err;
957 	uint64_t v = 0;
958 	bool wait = false;
959 	xen_store_domain_type = XS_UNKNOWN;
960 
961 	if (!xen_domain())
962 		return -ENODEV;
963 
964 	xenbus_ring_ops_init();
965 
966 	if (xen_pv_domain())
967 		xen_store_domain_type = XS_PV;
968 	if (xen_hvm_domain())
969 		xen_store_domain_type = XS_HVM;
970 	if (xen_hvm_domain() && xen_initial_domain())
971 		xen_store_domain_type = XS_LOCAL;
972 	if (xen_pv_domain() && !xen_start_info->store_evtchn)
973 		xen_store_domain_type = XS_LOCAL;
974 	if (xen_pv_domain() && xen_start_info->store_evtchn)
975 		xenstored_ready = 1;
976 
977 	switch (xen_store_domain_type) {
978 	case XS_LOCAL:
979 		err = xenstored_local_init();
980 		if (err)
981 			goto out_error;
982 		xen_store_interface = gfn_to_virt(xen_store_gfn);
983 		break;
984 	case XS_PV:
985 		xen_store_evtchn = xen_start_info->store_evtchn;
986 		xen_store_gfn = xen_start_info->store_mfn;
987 		xen_store_interface = gfn_to_virt(xen_store_gfn);
988 		break;
989 	case XS_HVM:
990 		err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
991 		if (err)
992 			goto out_error;
993 		xen_store_evtchn = (int)v;
994 		err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
995 		if (err)
996 			goto out_error;
997 		/*
998 		 * Uninitialized hvm_params are zero and return no error.
999 		 * Although it is theoretically possible to have
1000 		 * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is
1001 		 * not zero when valid. If zero, it means that Xenstore hasn't
1002 		 * been properly initialized. Instead of attempting to map a
1003 		 * wrong guest physical address return error.
1004 		 *
1005 		 * Also recognize all bits set as an invalid/uninitialized value.
1006 		 */
1007 		if (!v) {
1008 			err = -ENOENT;
1009 			goto out_error;
1010 		}
1011 		if (v == ~0ULL) {
1012 			wait = true;
1013 		} else {
1014 			/* Avoid truncation on 32-bit. */
1015 #if BITS_PER_LONG == 32
1016 			if (v > ULONG_MAX) {
1017 				pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
1018 				       __func__, v);
1019 				err = -EINVAL;
1020 				goto out_error;
1021 			}
1022 #endif
1023 			xen_store_gfn = (unsigned long)v;
1024 			xen_store_interface =
1025 				memremap(xen_store_gfn << XEN_PAGE_SHIFT,
1026 					 XEN_PAGE_SIZE, MEMREMAP_WB);
1027 			if (!xen_store_interface) {
1028 				pr_err("%s: cannot map HVM_PARAM_STORE_PFN=%llx\n",
1029 				       __func__, v);
1030 				err = -EINVAL;
1031 				goto out_error;
1032 			}
1033 			if (xen_store_interface->connection != XENSTORE_CONNECTED)
1034 				wait = true;
1035 		}
1036 		if (wait) {
1037 			err = bind_evtchn_to_irqhandler(xen_store_evtchn,
1038 							xenbus_late_init,
1039 							0, "xenstore_late_init",
1040 							&xb_waitq);
1041 			if (err < 0) {
1042 				pr_err("xenstore_late_init couldn't bind irq err=%d\n",
1043 				       err);
1044 				goto out_error;
1045 			}
1046 
1047 			xs_init_irq = err;
1048 		}
1049 		break;
1050 	default:
1051 		pr_warn("Xenstore state unknown\n");
1052 		break;
1053 	}
1054 
1055 	/*
1056 	 * HVM domains may not have a functional callback yet. In that
1057 	 * case let xs_init() be called from xenbus_probe(), which will
1058 	 * get invoked at an appropriate time.
1059 	 */
1060 	if (xen_store_domain_type != XS_HVM) {
1061 		err = xs_init();
1062 		if (err) {
1063 			pr_warn("Error initializing xenstore comms: %i\n", err);
1064 			goto out_error;
1065 		}
1066 	}
1067 
1068 	if ((xen_store_domain_type != XS_LOCAL) &&
1069 	    (xen_store_domain_type != XS_UNKNOWN))
1070 		xen_resume_notifier_register(&xenbus_resume_nb);
1071 
1072 #ifdef CONFIG_XEN_COMPAT_XENFS
1073 	/*
1074 	 * Create xenfs mountpoint in /proc for compatibility with
1075 	 * utilities that expect to find "xenbus" under "/proc/xen".
1076 	 */
1077 	proc_create_mount_point("xen");
1078 #endif
1079 	return 0;
1080 
1081 out_error:
1082 	xen_store_domain_type = XS_UNKNOWN;
1083 	return err;
1084 }
1085 
1086 postcore_initcall(xenbus_init);
1087 
1088 MODULE_LICENSE("GPL");
1089