1 /******************************************************************************
2  * Talks to Xen Store to figure out what devices we have.
3  *
4  * Copyright (C) 2005 Rusty Russell, IBM Corporation
5  * Copyright (C) 2005 Mike Wray, Hewlett-Packard
6  * Copyright (C) 2005, 2006 XenSource Ltd
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version 2
10  * as published by the Free Software Foundation; or, when distributed
11  * separately from the Linux kernel or incorporated into other
12  * software packages, subject to the following license:
13  *
14  * Permission is hereby granted, free of charge, to any person obtaining a copy
15  * of this source file (the "Software"), to deal in the Software without
16  * restriction, including without limitation the rights to use, copy, modify,
17  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18  * and to permit persons to whom the Software is furnished to do so, subject to
19  * the following conditions:
20  *
21  * The above copyright notice and this permission notice shall be included in
22  * all copies or substantial portions of the Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30  * IN THE SOFTWARE.
31  */
32 
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #define dev_fmt pr_fmt
35 
36 #define DPRINTK(fmt, args...)				\
37 	pr_debug("xenbus_probe (%s:%d) " fmt ".\n",	\
38 		 __func__, __LINE__, ##args)
39 
40 #include <linux/kernel.h>
41 #include <linux/err.h>
42 #include <linux/string.h>
43 #include <linux/ctype.h>
44 #include <linux/fcntl.h>
45 #include <linux/mm.h>
46 #include <linux/proc_fs.h>
47 #include <linux/notifier.h>
48 #include <linux/kthread.h>
49 #include <linux/mutex.h>
50 #include <linux/io.h>
51 #include <linux/slab.h>
52 #include <linux/module.h>
53 
54 #include <asm/page.h>
55 #include <asm/xen/hypervisor.h>
56 
57 #include <xen/xen.h>
58 #include <xen/xenbus.h>
59 #include <xen/events.h>
60 #include <xen/xen-ops.h>
61 #include <xen/page.h>
62 
63 #include <xen/hvm.h>
64 
65 #include "xenbus.h"
66 
67 
68 int xen_store_evtchn;
69 EXPORT_SYMBOL_GPL(xen_store_evtchn);
70 
71 struct xenstore_domain_interface *xen_store_interface;
72 EXPORT_SYMBOL_GPL(xen_store_interface);
73 
74 enum xenstore_init xen_store_domain_type;
75 EXPORT_SYMBOL_GPL(xen_store_domain_type);
76 
77 static unsigned long xen_store_gfn;
78 
79 static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
80 
81 /* If something in array of ids matches this device, return it. */
82 static const struct xenbus_device_id *
83 match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
84 {
85 	for (; *arr->devicetype != '\0'; arr++) {
86 		if (!strcmp(arr->devicetype, dev->devicetype))
87 			return arr;
88 	}
89 	return NULL;
90 }
91 
92 int xenbus_match(struct device *_dev, struct device_driver *_drv)
93 {
94 	struct xenbus_driver *drv = to_xenbus_driver(_drv);
95 
96 	if (!drv->ids)
97 		return 0;
98 
99 	return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
100 }
101 EXPORT_SYMBOL_GPL(xenbus_match);
102 
103 
104 static void free_otherend_details(struct xenbus_device *dev)
105 {
106 	kfree(dev->otherend);
107 	dev->otherend = NULL;
108 }
109 
110 
111 static void free_otherend_watch(struct xenbus_device *dev)
112 {
113 	if (dev->otherend_watch.node) {
114 		unregister_xenbus_watch(&dev->otherend_watch);
115 		kfree(dev->otherend_watch.node);
116 		dev->otherend_watch.node = NULL;
117 	}
118 }
119 
120 
121 static int talk_to_otherend(struct xenbus_device *dev)
122 {
123 	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
124 
125 	free_otherend_watch(dev);
126 	free_otherend_details(dev);
127 
128 	return drv->read_otherend_details(dev);
129 }
130 
131 
132 
133 static int watch_otherend(struct xenbus_device *dev)
134 {
135 	struct xen_bus_type *bus =
136 		container_of(dev->dev.bus, struct xen_bus_type, bus);
137 
138 	return xenbus_watch_pathfmt(dev, &dev->otherend_watch,
139 				    bus->otherend_will_handle,
140 				    bus->otherend_changed,
141 				    "%s/%s", dev->otherend, "state");
142 }
143 
144 
145 int xenbus_read_otherend_details(struct xenbus_device *xendev,
146 				 char *id_node, char *path_node)
147 {
148 	int err = xenbus_gather(XBT_NIL, xendev->nodename,
149 				id_node, "%i", &xendev->otherend_id,
150 				path_node, NULL, &xendev->otherend,
151 				NULL);
152 	if (err) {
153 		xenbus_dev_fatal(xendev, err,
154 				 "reading other end details from %s",
155 				 xendev->nodename);
156 		return err;
157 	}
158 	if (strlen(xendev->otherend) == 0 ||
159 	    !xenbus_exists(XBT_NIL, xendev->otherend, "")) {
160 		xenbus_dev_fatal(xendev, -ENOENT,
161 				 "unable to read other end from %s.  "
162 				 "missing or inaccessible.",
163 				 xendev->nodename);
164 		free_otherend_details(xendev);
165 		return -ENOENT;
166 	}
167 
168 	return 0;
169 }
170 EXPORT_SYMBOL_GPL(xenbus_read_otherend_details);
171 
172 void xenbus_otherend_changed(struct xenbus_watch *watch,
173 			     const char *path, const char *token,
174 			     int ignore_on_shutdown)
175 {
176 	struct xenbus_device *dev =
177 		container_of(watch, struct xenbus_device, otherend_watch);
178 	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
179 	enum xenbus_state state;
180 
181 	/* Protect us against watches firing on old details when the otherend
182 	   details change, say immediately after a resume. */
183 	if (!dev->otherend ||
184 	    strncmp(dev->otherend, path, strlen(dev->otherend))) {
185 		dev_dbg(&dev->dev, "Ignoring watch at %s\n", path);
186 		return;
187 	}
188 
189 	state = xenbus_read_driver_state(dev->otherend);
190 
191 	dev_dbg(&dev->dev, "state is %d, (%s), %s, %s\n",
192 		state, xenbus_strstate(state), dev->otherend_watch.node, path);
193 
194 	/*
195 	 * Ignore xenbus transitions during shutdown. This prevents us doing
196 	 * work that can fail e.g., when the rootfs is gone.
197 	 */
198 	if (system_state > SYSTEM_RUNNING) {
199 		if (ignore_on_shutdown && (state == XenbusStateClosing))
200 			xenbus_frontend_closed(dev);
201 		return;
202 	}
203 
204 	if (drv->otherend_changed)
205 		drv->otherend_changed(dev, state);
206 }
207 EXPORT_SYMBOL_GPL(xenbus_otherend_changed);
208 
209 #define XENBUS_SHOW_STAT(name)						\
210 static ssize_t name##_show(struct device *_dev,				\
211 			   struct device_attribute *attr,		\
212 			   char *buf)					\
213 {									\
214 	struct xenbus_device *dev = to_xenbus_device(_dev);		\
215 									\
216 	return sprintf(buf, "%d\n", atomic_read(&dev->name));		\
217 }									\
218 static DEVICE_ATTR_RO(name)
219 
220 XENBUS_SHOW_STAT(event_channels);
221 XENBUS_SHOW_STAT(events);
222 XENBUS_SHOW_STAT(spurious_events);
223 XENBUS_SHOW_STAT(jiffies_eoi_delayed);
224 
225 static ssize_t spurious_threshold_show(struct device *_dev,
226 				       struct device_attribute *attr,
227 				       char *buf)
228 {
229 	struct xenbus_device *dev = to_xenbus_device(_dev);
230 
231 	return sprintf(buf, "%d\n", dev->spurious_threshold);
232 }
233 
234 static ssize_t spurious_threshold_store(struct device *_dev,
235 					struct device_attribute *attr,
236 					const char *buf, size_t count)
237 {
238 	struct xenbus_device *dev = to_xenbus_device(_dev);
239 	unsigned int val;
240 	ssize_t ret;
241 
242 	ret = kstrtouint(buf, 0, &val);
243 	if (ret)
244 		return ret;
245 
246 	dev->spurious_threshold = val;
247 
248 	return count;
249 }
250 
251 static DEVICE_ATTR_RW(spurious_threshold);
252 
253 static struct attribute *xenbus_attrs[] = {
254 	&dev_attr_event_channels.attr,
255 	&dev_attr_events.attr,
256 	&dev_attr_spurious_events.attr,
257 	&dev_attr_jiffies_eoi_delayed.attr,
258 	&dev_attr_spurious_threshold.attr,
259 	NULL
260 };
261 
262 static const struct attribute_group xenbus_group = {
263 	.name = "xenbus",
264 	.attrs = xenbus_attrs,
265 };
266 
267 int xenbus_dev_probe(struct device *_dev)
268 {
269 	struct xenbus_device *dev = to_xenbus_device(_dev);
270 	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
271 	const struct xenbus_device_id *id;
272 	int err;
273 
274 	DPRINTK("%s", dev->nodename);
275 
276 	if (!drv->probe) {
277 		err = -ENODEV;
278 		goto fail;
279 	}
280 
281 	id = match_device(drv->ids, dev);
282 	if (!id) {
283 		err = -ENODEV;
284 		goto fail;
285 	}
286 
287 	err = talk_to_otherend(dev);
288 	if (err) {
289 		dev_warn(&dev->dev, "talk_to_otherend on %s failed.\n",
290 			 dev->nodename);
291 		return err;
292 	}
293 
294 	if (!try_module_get(drv->driver.owner)) {
295 		dev_warn(&dev->dev, "failed to acquire module reference on '%s'\n",
296 			 drv->driver.name);
297 		err = -ESRCH;
298 		goto fail;
299 	}
300 
301 	down(&dev->reclaim_sem);
302 	err = drv->probe(dev, id);
303 	up(&dev->reclaim_sem);
304 	if (err)
305 		goto fail_put;
306 
307 	err = watch_otherend(dev);
308 	if (err) {
309 		dev_warn(&dev->dev, "watch_otherend on %s failed.\n",
310 		       dev->nodename);
311 		return err;
312 	}
313 
314 	dev->spurious_threshold = 1;
315 	if (sysfs_create_group(&dev->dev.kobj, &xenbus_group))
316 		dev_warn(&dev->dev, "sysfs_create_group on %s failed.\n",
317 			 dev->nodename);
318 
319 	return 0;
320 fail_put:
321 	module_put(drv->driver.owner);
322 fail:
323 	xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
324 	return err;
325 }
326 EXPORT_SYMBOL_GPL(xenbus_dev_probe);
327 
328 int xenbus_dev_remove(struct device *_dev)
329 {
330 	struct xenbus_device *dev = to_xenbus_device(_dev);
331 	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
332 
333 	DPRINTK("%s", dev->nodename);
334 
335 	sysfs_remove_group(&dev->dev.kobj, &xenbus_group);
336 
337 	free_otherend_watch(dev);
338 
339 	if (drv->remove) {
340 		down(&dev->reclaim_sem);
341 		drv->remove(dev);
342 		up(&dev->reclaim_sem);
343 	}
344 
345 	module_put(drv->driver.owner);
346 
347 	free_otherend_details(dev);
348 
349 	/*
350 	 * If the toolstack has forced the device state to closing then set
351 	 * the state to closed now to allow it to be cleaned up.
352 	 * Similarly, if the driver does not support re-bind, set the
353 	 * closed.
354 	 */
355 	if (!drv->allow_rebind ||
356 	    xenbus_read_driver_state(dev->nodename) == XenbusStateClosing)
357 		xenbus_switch_state(dev, XenbusStateClosed);
358 
359 	return 0;
360 }
361 EXPORT_SYMBOL_GPL(xenbus_dev_remove);
362 
363 int xenbus_register_driver_common(struct xenbus_driver *drv,
364 				  struct xen_bus_type *bus,
365 				  struct module *owner, const char *mod_name)
366 {
367 	drv->driver.name = drv->name ? drv->name : drv->ids[0].devicetype;
368 	drv->driver.bus = &bus->bus;
369 	drv->driver.owner = owner;
370 	drv->driver.mod_name = mod_name;
371 
372 	return driver_register(&drv->driver);
373 }
374 EXPORT_SYMBOL_GPL(xenbus_register_driver_common);
375 
376 void xenbus_unregister_driver(struct xenbus_driver *drv)
377 {
378 	driver_unregister(&drv->driver);
379 }
380 EXPORT_SYMBOL_GPL(xenbus_unregister_driver);
381 
382 struct xb_find_info {
383 	struct xenbus_device *dev;
384 	const char *nodename;
385 };
386 
387 static int cmp_dev(struct device *dev, void *data)
388 {
389 	struct xenbus_device *xendev = to_xenbus_device(dev);
390 	struct xb_find_info *info = data;
391 
392 	if (!strcmp(xendev->nodename, info->nodename)) {
393 		info->dev = xendev;
394 		get_device(dev);
395 		return 1;
396 	}
397 	return 0;
398 }
399 
400 static struct xenbus_device *xenbus_device_find(const char *nodename,
401 						struct bus_type *bus)
402 {
403 	struct xb_find_info info = { .dev = NULL, .nodename = nodename };
404 
405 	bus_for_each_dev(bus, NULL, &info, cmp_dev);
406 	return info.dev;
407 }
408 
409 static int cleanup_dev(struct device *dev, void *data)
410 {
411 	struct xenbus_device *xendev = to_xenbus_device(dev);
412 	struct xb_find_info *info = data;
413 	int len = strlen(info->nodename);
414 
415 	DPRINTK("%s", info->nodename);
416 
417 	/* Match the info->nodename path, or any subdirectory of that path. */
418 	if (strncmp(xendev->nodename, info->nodename, len))
419 		return 0;
420 
421 	/* If the node name is longer, ensure it really is a subdirectory. */
422 	if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/'))
423 		return 0;
424 
425 	info->dev = xendev;
426 	get_device(dev);
427 	return 1;
428 }
429 
430 static void xenbus_cleanup_devices(const char *path, struct bus_type *bus)
431 {
432 	struct xb_find_info info = { .nodename = path };
433 
434 	do {
435 		info.dev = NULL;
436 		bus_for_each_dev(bus, NULL, &info, cleanup_dev);
437 		if (info.dev) {
438 			device_unregister(&info.dev->dev);
439 			put_device(&info.dev->dev);
440 		}
441 	} while (info.dev);
442 }
443 
444 static void xenbus_dev_release(struct device *dev)
445 {
446 	if (dev)
447 		kfree(to_xenbus_device(dev));
448 }
449 
450 static ssize_t nodename_show(struct device *dev,
451 			     struct device_attribute *attr, char *buf)
452 {
453 	return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
454 }
455 static DEVICE_ATTR_RO(nodename);
456 
457 static ssize_t devtype_show(struct device *dev,
458 			    struct device_attribute *attr, char *buf)
459 {
460 	return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
461 }
462 static DEVICE_ATTR_RO(devtype);
463 
464 static ssize_t modalias_show(struct device *dev,
465 			     struct device_attribute *attr, char *buf)
466 {
467 	return sprintf(buf, "%s:%s\n", dev->bus->name,
468 		       to_xenbus_device(dev)->devicetype);
469 }
470 static DEVICE_ATTR_RO(modalias);
471 
472 static ssize_t state_show(struct device *dev,
473 			    struct device_attribute *attr, char *buf)
474 {
475 	return sprintf(buf, "%s\n",
476 			xenbus_strstate(to_xenbus_device(dev)->state));
477 }
478 static DEVICE_ATTR_RO(state);
479 
480 static struct attribute *xenbus_dev_attrs[] = {
481 	&dev_attr_nodename.attr,
482 	&dev_attr_devtype.attr,
483 	&dev_attr_modalias.attr,
484 	&dev_attr_state.attr,
485 	NULL,
486 };
487 
488 static const struct attribute_group xenbus_dev_group = {
489 	.attrs = xenbus_dev_attrs,
490 };
491 
492 const struct attribute_group *xenbus_dev_groups[] = {
493 	&xenbus_dev_group,
494 	NULL,
495 };
496 EXPORT_SYMBOL_GPL(xenbus_dev_groups);
497 
498 int xenbus_probe_node(struct xen_bus_type *bus,
499 		      const char *type,
500 		      const char *nodename)
501 {
502 	char devname[XEN_BUS_ID_SIZE];
503 	int err;
504 	struct xenbus_device *xendev;
505 	size_t stringlen;
506 	char *tmpstring;
507 
508 	enum xenbus_state state = xenbus_read_driver_state(nodename);
509 
510 	if (state != XenbusStateInitialising) {
511 		/* Device is not new, so ignore it.  This can happen if a
512 		   device is going away after switching to Closed.  */
513 		return 0;
514 	}
515 
516 	stringlen = strlen(nodename) + 1 + strlen(type) + 1;
517 	xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL);
518 	if (!xendev)
519 		return -ENOMEM;
520 
521 	xendev->state = XenbusStateInitialising;
522 
523 	/* Copy the strings into the extra space. */
524 
525 	tmpstring = (char *)(xendev + 1);
526 	strcpy(tmpstring, nodename);
527 	xendev->nodename = tmpstring;
528 
529 	tmpstring += strlen(tmpstring) + 1;
530 	strcpy(tmpstring, type);
531 	xendev->devicetype = tmpstring;
532 	init_completion(&xendev->down);
533 
534 	xendev->dev.bus = &bus->bus;
535 	xendev->dev.release = xenbus_dev_release;
536 
537 	err = bus->get_bus_id(devname, xendev->nodename);
538 	if (err)
539 		goto fail;
540 
541 	dev_set_name(&xendev->dev, "%s", devname);
542 	sema_init(&xendev->reclaim_sem, 1);
543 
544 	/* Register with generic device framework. */
545 	err = device_register(&xendev->dev);
546 	if (err) {
547 		put_device(&xendev->dev);
548 		xendev = NULL;
549 		goto fail;
550 	}
551 
552 	return 0;
553 fail:
554 	kfree(xendev);
555 	return err;
556 }
557 EXPORT_SYMBOL_GPL(xenbus_probe_node);
558 
559 static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
560 {
561 	int err = 0;
562 	char **dir;
563 	unsigned int dir_n = 0;
564 	int i;
565 
566 	dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n);
567 	if (IS_ERR(dir))
568 		return PTR_ERR(dir);
569 
570 	for (i = 0; i < dir_n; i++) {
571 		err = bus->probe(bus, type, dir[i]);
572 		if (err)
573 			break;
574 	}
575 
576 	kfree(dir);
577 	return err;
578 }
579 
580 int xenbus_probe_devices(struct xen_bus_type *bus)
581 {
582 	int err = 0;
583 	char **dir;
584 	unsigned int i, dir_n;
585 
586 	dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n);
587 	if (IS_ERR(dir))
588 		return PTR_ERR(dir);
589 
590 	for (i = 0; i < dir_n; i++) {
591 		err = xenbus_probe_device_type(bus, dir[i]);
592 		if (err)
593 			break;
594 	}
595 
596 	kfree(dir);
597 	return err;
598 }
599 EXPORT_SYMBOL_GPL(xenbus_probe_devices);
600 
601 static unsigned int char_count(const char *str, char c)
602 {
603 	unsigned int i, ret = 0;
604 
605 	for (i = 0; str[i]; i++)
606 		if (str[i] == c)
607 			ret++;
608 	return ret;
609 }
610 
611 static int strsep_len(const char *str, char c, unsigned int len)
612 {
613 	unsigned int i;
614 
615 	for (i = 0; str[i]; i++)
616 		if (str[i] == c) {
617 			if (len == 0)
618 				return i;
619 			len--;
620 		}
621 	return (len == 0) ? i : -ERANGE;
622 }
623 
624 void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
625 {
626 	int exists, rootlen;
627 	struct xenbus_device *dev;
628 	char type[XEN_BUS_ID_SIZE];
629 	const char *p, *root;
630 
631 	if (char_count(node, '/') < 2)
632 		return;
633 
634 	exists = xenbus_exists(XBT_NIL, node, "");
635 	if (!exists) {
636 		xenbus_cleanup_devices(node, &bus->bus);
637 		return;
638 	}
639 
640 	/* backend/<type>/... or device/<type>/... */
641 	p = strchr(node, '/') + 1;
642 	snprintf(type, XEN_BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
643 	type[XEN_BUS_ID_SIZE-1] = '\0';
644 
645 	rootlen = strsep_len(node, '/', bus->levels);
646 	if (rootlen < 0)
647 		return;
648 	root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node);
649 	if (!root)
650 		return;
651 
652 	dev = xenbus_device_find(root, &bus->bus);
653 	if (!dev)
654 		xenbus_probe_node(bus, type, root);
655 	else
656 		put_device(&dev->dev);
657 
658 	kfree(root);
659 }
660 EXPORT_SYMBOL_GPL(xenbus_dev_changed);
661 
662 int xenbus_dev_suspend(struct device *dev)
663 {
664 	int err = 0;
665 	struct xenbus_driver *drv;
666 	struct xenbus_device *xdev
667 		= container_of(dev, struct xenbus_device, dev);
668 
669 	DPRINTK("%s", xdev->nodename);
670 
671 	if (dev->driver == NULL)
672 		return 0;
673 	drv = to_xenbus_driver(dev->driver);
674 	if (drv->suspend)
675 		err = drv->suspend(xdev);
676 	if (err)
677 		dev_warn(dev, "suspend failed: %i\n", err);
678 	return 0;
679 }
680 EXPORT_SYMBOL_GPL(xenbus_dev_suspend);
681 
682 int xenbus_dev_resume(struct device *dev)
683 {
684 	int err;
685 	struct xenbus_driver *drv;
686 	struct xenbus_device *xdev
687 		= container_of(dev, struct xenbus_device, dev);
688 
689 	DPRINTK("%s", xdev->nodename);
690 
691 	if (dev->driver == NULL)
692 		return 0;
693 	drv = to_xenbus_driver(dev->driver);
694 	err = talk_to_otherend(xdev);
695 	if (err) {
696 		dev_warn(dev, "resume (talk_to_otherend) failed: %i\n", err);
697 		return err;
698 	}
699 
700 	xdev->state = XenbusStateInitialising;
701 
702 	if (drv->resume) {
703 		err = drv->resume(xdev);
704 		if (err) {
705 			dev_warn(dev, "resume failed: %i\n", err);
706 			return err;
707 		}
708 	}
709 
710 	err = watch_otherend(xdev);
711 	if (err) {
712 		dev_warn(dev, "resume (watch_otherend) failed: %d\n", err);
713 		return err;
714 	}
715 
716 	return 0;
717 }
718 EXPORT_SYMBOL_GPL(xenbus_dev_resume);
719 
720 int xenbus_dev_cancel(struct device *dev)
721 {
722 	/* Do nothing */
723 	DPRINTK("cancel");
724 	return 0;
725 }
726 EXPORT_SYMBOL_GPL(xenbus_dev_cancel);
727 
728 /* A flag to determine if xenstored is 'ready' (i.e. has started) */
729 int xenstored_ready;
730 
731 
732 int register_xenstore_notifier(struct notifier_block *nb)
733 {
734 	int ret = 0;
735 
736 	if (xenstored_ready > 0)
737 		ret = nb->notifier_call(nb, 0, NULL);
738 	else
739 		blocking_notifier_chain_register(&xenstore_chain, nb);
740 
741 	return ret;
742 }
743 EXPORT_SYMBOL_GPL(register_xenstore_notifier);
744 
745 void unregister_xenstore_notifier(struct notifier_block *nb)
746 {
747 	blocking_notifier_chain_unregister(&xenstore_chain, nb);
748 }
749 EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
750 
751 static void xenbus_probe(void)
752 {
753 	xenstored_ready = 1;
754 
755 	/*
756 	 * In the HVM case, xenbus_init() deferred its call to
757 	 * xs_init() in case callbacks were not operational yet.
758 	 * So do it now.
759 	 */
760 	if (xen_store_domain_type == XS_HVM)
761 		xs_init();
762 
763 	/* Notify others that xenstore is up */
764 	blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
765 }
766 
767 /*
768  * Returns true when XenStore init must be deferred in order to
769  * allow the PCI platform device to be initialised, before we
770  * can actually have event channel interrupts working.
771  */
772 static bool xs_hvm_defer_init_for_callback(void)
773 {
774 #ifdef CONFIG_XEN_PVHVM
775 	return xen_store_domain_type == XS_HVM &&
776 		!xen_have_vector_callback;
777 #else
778 	return false;
779 #endif
780 }
781 
782 static int xenbus_probe_thread(void *unused)
783 {
784 	DEFINE_WAIT(w);
785 
786 	/*
787 	 * We actually just want to wait for *any* trigger of xb_waitq,
788 	 * and run xenbus_probe() the moment it occurs.
789 	 */
790 	prepare_to_wait(&xb_waitq, &w, TASK_INTERRUPTIBLE);
791 	schedule();
792 	finish_wait(&xb_waitq, &w);
793 
794 	DPRINTK("probing");
795 	xenbus_probe();
796 	return 0;
797 }
798 
799 static int __init xenbus_probe_initcall(void)
800 {
801 	/*
802 	 * Probe XenBus here in the XS_PV case, and also XS_HVM unless we
803 	 * need to wait for the platform PCI device to come up.
804 	 */
805 	if (xen_store_domain_type == XS_PV ||
806 	    (xen_store_domain_type == XS_HVM &&
807 	     !xs_hvm_defer_init_for_callback()))
808 		xenbus_probe();
809 
810 	/*
811 	 * For XS_LOCAL, spawn a thread which will wait for xenstored
812 	 * or a xenstore-stubdom to be started, then probe. It will be
813 	 * triggered when communication starts happening, by waiting
814 	 * on xb_waitq.
815 	 */
816 	if (xen_store_domain_type == XS_LOCAL) {
817 		struct task_struct *probe_task;
818 
819 		probe_task = kthread_run(xenbus_probe_thread, NULL,
820 					 "xenbus_probe");
821 		if (IS_ERR(probe_task))
822 			return PTR_ERR(probe_task);
823 	}
824 	return 0;
825 }
826 device_initcall(xenbus_probe_initcall);
827 
828 int xen_set_callback_via(uint64_t via)
829 {
830 	struct xen_hvm_param a;
831 	int ret;
832 
833 	a.domid = DOMID_SELF;
834 	a.index = HVM_PARAM_CALLBACK_IRQ;
835 	a.value = via;
836 
837 	ret = HYPERVISOR_hvm_op(HVMOP_set_param, &a);
838 	if (ret)
839 		return ret;
840 
841 	/*
842 	 * If xenbus_probe_initcall() deferred the xenbus_probe()
843 	 * due to the callback not functioning yet, we can do it now.
844 	 */
845 	if (!xenstored_ready && xs_hvm_defer_init_for_callback())
846 		xenbus_probe();
847 
848 	return ret;
849 }
850 EXPORT_SYMBOL_GPL(xen_set_callback_via);
851 
852 /* Set up event channel for xenstored which is run as a local process
853  * (this is normally used only in dom0)
854  */
855 static int __init xenstored_local_init(void)
856 {
857 	int err = -ENOMEM;
858 	unsigned long page = 0;
859 	struct evtchn_alloc_unbound alloc_unbound;
860 
861 	/* Allocate Xenstore page */
862 	page = get_zeroed_page(GFP_KERNEL);
863 	if (!page)
864 		goto out_err;
865 
866 	xen_store_gfn = virt_to_gfn((void *)page);
867 
868 	/* Next allocate a local port which xenstored can bind to */
869 	alloc_unbound.dom        = DOMID_SELF;
870 	alloc_unbound.remote_dom = DOMID_SELF;
871 
872 	err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
873 					  &alloc_unbound);
874 	if (err == -ENOSYS)
875 		goto out_err;
876 
877 	BUG_ON(err);
878 	xen_store_evtchn = alloc_unbound.port;
879 
880 	return 0;
881 
882  out_err:
883 	if (page != 0)
884 		free_page(page);
885 	return err;
886 }
887 
888 static int xenbus_resume_cb(struct notifier_block *nb,
889 			    unsigned long action, void *data)
890 {
891 	int err = 0;
892 
893 	if (xen_hvm_domain()) {
894 		uint64_t v = 0;
895 
896 		err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
897 		if (!err && v)
898 			xen_store_evtchn = v;
899 		else
900 			pr_warn("Cannot update xenstore event channel: %d\n",
901 				err);
902 	} else
903 		xen_store_evtchn = xen_start_info->store_evtchn;
904 
905 	return err;
906 }
907 
908 static struct notifier_block xenbus_resume_nb = {
909 	.notifier_call = xenbus_resume_cb,
910 };
911 
912 static int __init xenbus_init(void)
913 {
914 	int err = 0;
915 	uint64_t v = 0;
916 	xen_store_domain_type = XS_UNKNOWN;
917 
918 	if (!xen_domain())
919 		return -ENODEV;
920 
921 	xenbus_ring_ops_init();
922 
923 	if (xen_pv_domain())
924 		xen_store_domain_type = XS_PV;
925 	if (xen_hvm_domain())
926 		xen_store_domain_type = XS_HVM;
927 	if (xen_hvm_domain() && xen_initial_domain())
928 		xen_store_domain_type = XS_LOCAL;
929 	if (xen_pv_domain() && !xen_start_info->store_evtchn)
930 		xen_store_domain_type = XS_LOCAL;
931 	if (xen_pv_domain() && xen_start_info->store_evtchn)
932 		xenstored_ready = 1;
933 
934 	switch (xen_store_domain_type) {
935 	case XS_LOCAL:
936 		err = xenstored_local_init();
937 		if (err)
938 			goto out_error;
939 		xen_store_interface = gfn_to_virt(xen_store_gfn);
940 		break;
941 	case XS_PV:
942 		xen_store_evtchn = xen_start_info->store_evtchn;
943 		xen_store_gfn = xen_start_info->store_mfn;
944 		xen_store_interface = gfn_to_virt(xen_store_gfn);
945 		break;
946 	case XS_HVM:
947 		err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
948 		if (err)
949 			goto out_error;
950 		xen_store_evtchn = (int)v;
951 		err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
952 		if (err)
953 			goto out_error;
954 		xen_store_gfn = (unsigned long)v;
955 		xen_store_interface =
956 			xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
957 				  XEN_PAGE_SIZE);
958 		break;
959 	default:
960 		pr_warn("Xenstore state unknown\n");
961 		break;
962 	}
963 
964 	/*
965 	 * HVM domains may not have a functional callback yet. In that
966 	 * case let xs_init() be called from xenbus_probe(), which will
967 	 * get invoked at an appropriate time.
968 	 */
969 	if (xen_store_domain_type != XS_HVM) {
970 		err = xs_init();
971 		if (err) {
972 			pr_warn("Error initializing xenstore comms: %i\n", err);
973 			goto out_error;
974 		}
975 	}
976 
977 	if ((xen_store_domain_type != XS_LOCAL) &&
978 	    (xen_store_domain_type != XS_UNKNOWN))
979 		xen_resume_notifier_register(&xenbus_resume_nb);
980 
981 #ifdef CONFIG_XEN_COMPAT_XENFS
982 	/*
983 	 * Create xenfs mountpoint in /proc for compatibility with
984 	 * utilities that expect to find "xenbus" under "/proc/xen".
985 	 */
986 	proc_create_mount_point("xen");
987 #endif
988 
989 out_error:
990 	return err;
991 }
992 
993 postcore_initcall(xenbus_init);
994 
995 MODULE_LICENSE("GPL");
996