1 /*  Xenbus code for blkif backend
2     Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
3     Copyright (C) 2005 XenSource Ltd
4 
5     This program is free software; you can redistribute it and/or modify
6     it under the terms of the GNU General Public License as published by
7     the Free Software Foundation; either version 2 of the License, or
8     (at your option) any later version.
9 
10     This program is distributed in the hope that it will be useful,
11     but WITHOUT ANY WARRANTY; without even the implied warranty of
12     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13     GNU General Public License for more details.
14 
15 */
16 
17 #include <stdarg.h>
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <xen/events.h>
21 #include <xen/grant_table.h>
22 #include "common.h"
23 
24 struct backend_info {
25 	struct xenbus_device	*dev;
26 	struct xen_blkif	*blkif;
27 	struct xenbus_watch	backend_watch;
28 	unsigned		major;
29 	unsigned		minor;
30 	char			*mode;
31 };
32 
33 static struct kmem_cache *xen_blkif_cachep;
34 static void connect(struct backend_info *);
35 static int connect_ring(struct backend_info *);
36 static void backend_changed(struct xenbus_watch *, const char **,
37 			    unsigned int);
38 
39 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be)
40 {
41 	return be->dev;
42 }
43 
44 static int blkback_name(struct xen_blkif *blkif, char *buf)
45 {
46 	char *devpath, *devname;
47 	struct xenbus_device *dev = blkif->be->dev;
48 
49 	devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
50 	if (IS_ERR(devpath))
51 		return PTR_ERR(devpath);
52 
53 	devname = strstr(devpath, "/dev/");
54 	if (devname != NULL)
55 		devname += strlen("/dev/");
56 	else
57 		devname  = devpath;
58 
59 	snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname);
60 	kfree(devpath);
61 
62 	return 0;
63 }
64 
65 static void xen_update_blkif_status(struct xen_blkif *blkif)
66 {
67 	int err;
68 	char name[TASK_COMM_LEN];
69 
70 	/* Not ready to connect? */
71 	if (!blkif->irq || !blkif->vbd.bdev)
72 		return;
73 
74 	/* Already connected? */
75 	if (blkif->be->dev->state == XenbusStateConnected)
76 		return;
77 
78 	/* Attempt to connect: exit if we fail to. */
79 	connect(blkif->be);
80 	if (blkif->be->dev->state != XenbusStateConnected)
81 		return;
82 
83 	err = blkback_name(blkif, name);
84 	if (err) {
85 		xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
86 		return;
87 	}
88 
89 	err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
90 	if (err) {
91 		xenbus_dev_error(blkif->be->dev, err, "block flush");
92 		return;
93 	}
94 	invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
95 
96 	blkif->xenblkd = kthread_run(xen_blkif_schedule, blkif, name);
97 	if (IS_ERR(blkif->xenblkd)) {
98 		err = PTR_ERR(blkif->xenblkd);
99 		blkif->xenblkd = NULL;
100 		xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
101 	}
102 }
103 
104 static struct xen_blkif *xen_blkif_alloc(domid_t domid)
105 {
106 	struct xen_blkif *blkif;
107 
108 	blkif = kmem_cache_alloc(xen_blkif_cachep, GFP_KERNEL);
109 	if (!blkif)
110 		return ERR_PTR(-ENOMEM);
111 
112 	memset(blkif, 0, sizeof(*blkif));
113 	blkif->domid = domid;
114 	spin_lock_init(&blkif->blk_ring_lock);
115 	atomic_set(&blkif->refcnt, 1);
116 	init_waitqueue_head(&blkif->wq);
117 	blkif->st_print = jiffies;
118 	init_waitqueue_head(&blkif->waiting_to_free);
119 
120 	return blkif;
121 }
122 
123 static int map_frontend_page(struct xen_blkif *blkif, unsigned long shared_page)
124 {
125 	struct gnttab_map_grant_ref op;
126 
127 	gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
128 			  GNTMAP_host_map, shared_page, blkif->domid);
129 
130 	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
131 		BUG();
132 
133 	if (op.status) {
134 		DPRINTK("Grant table operation failure !\n");
135 		return op.status;
136 	}
137 
138 	blkif->shmem_ref = shared_page;
139 	blkif->shmem_handle = op.handle;
140 
141 	return 0;
142 }
143 
144 static void unmap_frontend_page(struct xen_blkif *blkif)
145 {
146 	struct gnttab_unmap_grant_ref op;
147 
148 	gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
149 			    GNTMAP_host_map, blkif->shmem_handle);
150 
151 	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
152 		BUG();
153 }
154 
155 static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
156 			 unsigned int evtchn)
157 {
158 	int err;
159 
160 	/* Already connected through? */
161 	if (blkif->irq)
162 		return 0;
163 
164 	blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE);
165 	if (!blkif->blk_ring_area)
166 		return -ENOMEM;
167 
168 	err = map_frontend_page(blkif, shared_page);
169 	if (err) {
170 		free_vm_area(blkif->blk_ring_area);
171 		return err;
172 	}
173 
174 	switch (blkif->blk_protocol) {
175 	case BLKIF_PROTOCOL_NATIVE:
176 	{
177 		struct blkif_sring *sring;
178 		sring = (struct blkif_sring *)blkif->blk_ring_area->addr;
179 		BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
180 		break;
181 	}
182 	case BLKIF_PROTOCOL_X86_32:
183 	{
184 		struct blkif_x86_32_sring *sring_x86_32;
185 		sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring_area->addr;
186 		BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
187 		break;
188 	}
189 	case BLKIF_PROTOCOL_X86_64:
190 	{
191 		struct blkif_x86_64_sring *sring_x86_64;
192 		sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring_area->addr;
193 		BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
194 		break;
195 	}
196 	default:
197 		BUG();
198 	}
199 
200 	err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
201 						    xen_blkif_be_int, 0,
202 						    "blkif-backend", blkif);
203 	if (err < 0) {
204 		unmap_frontend_page(blkif);
205 		free_vm_area(blkif->blk_ring_area);
206 		blkif->blk_rings.common.sring = NULL;
207 		return err;
208 	}
209 	blkif->irq = err;
210 
211 	return 0;
212 }
213 
214 static void xen_blkif_disconnect(struct xen_blkif *blkif)
215 {
216 	if (blkif->xenblkd) {
217 		kthread_stop(blkif->xenblkd);
218 		blkif->xenblkd = NULL;
219 	}
220 
221 	atomic_dec(&blkif->refcnt);
222 	wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
223 	atomic_inc(&blkif->refcnt);
224 
225 	if (blkif->irq) {
226 		unbind_from_irqhandler(blkif->irq, blkif);
227 		blkif->irq = 0;
228 	}
229 
230 	if (blkif->blk_rings.common.sring) {
231 		unmap_frontend_page(blkif);
232 		free_vm_area(blkif->blk_ring_area);
233 		blkif->blk_rings.common.sring = NULL;
234 	}
235 }
236 
237 void xen_blkif_free(struct xen_blkif *blkif)
238 {
239 	if (!atomic_dec_and_test(&blkif->refcnt))
240 		BUG();
241 	kmem_cache_free(xen_blkif_cachep, blkif);
242 }
243 
244 int __init xen_blkif_interface_init(void)
245 {
246 	xen_blkif_cachep = kmem_cache_create("blkif_cache",
247 					     sizeof(struct xen_blkif),
248 					     0, 0, NULL);
249 	if (!xen_blkif_cachep)
250 		return -ENOMEM;
251 
252 	return 0;
253 }
254 
255 /*
256  *  sysfs interface for VBD I/O requests
257  */
258 
259 #define VBD_SHOW(name, format, args...)					\
260 	static ssize_t show_##name(struct device *_dev,			\
261 				   struct device_attribute *attr,	\
262 				   char *buf)				\
263 	{								\
264 		struct xenbus_device *dev = to_xenbus_device(_dev);	\
265 		struct backend_info *be = dev_get_drvdata(&dev->dev);	\
266 									\
267 		return sprintf(buf, format, ##args);			\
268 	}								\
269 	static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
270 
271 VBD_SHOW(oo_req,  "%d\n", be->blkif->st_oo_req);
272 VBD_SHOW(rd_req,  "%d\n", be->blkif->st_rd_req);
273 VBD_SHOW(wr_req,  "%d\n", be->blkif->st_wr_req);
274 VBD_SHOW(f_req,  "%d\n", be->blkif->st_f_req);
275 VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
276 VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
277 
278 static struct attribute *xen_vbdstat_attrs[] = {
279 	&dev_attr_oo_req.attr,
280 	&dev_attr_rd_req.attr,
281 	&dev_attr_wr_req.attr,
282 	&dev_attr_f_req.attr,
283 	&dev_attr_rd_sect.attr,
284 	&dev_attr_wr_sect.attr,
285 	NULL
286 };
287 
288 static struct attribute_group xen_vbdstat_group = {
289 	.name = "statistics",
290 	.attrs = xen_vbdstat_attrs,
291 };
292 
293 VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
294 VBD_SHOW(mode, "%s\n", be->mode);
295 
296 int xenvbd_sysfs_addif(struct xenbus_device *dev)
297 {
298 	int error;
299 
300 	error = device_create_file(&dev->dev, &dev_attr_physical_device);
301 	if (error)
302 		goto fail1;
303 
304 	error = device_create_file(&dev->dev, &dev_attr_mode);
305 	if (error)
306 		goto fail2;
307 
308 	error = sysfs_create_group(&dev->dev.kobj, &xen_vbdstat_group);
309 	if (error)
310 		goto fail3;
311 
312 	return 0;
313 
314 fail3:	sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
315 fail2:	device_remove_file(&dev->dev, &dev_attr_mode);
316 fail1:	device_remove_file(&dev->dev, &dev_attr_physical_device);
317 	return error;
318 }
319 
320 void xenvbd_sysfs_delif(struct xenbus_device *dev)
321 {
322 	sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
323 	device_remove_file(&dev->dev, &dev_attr_mode);
324 	device_remove_file(&dev->dev, &dev_attr_physical_device);
325 }
326 
327 
328 static void xen_vbd_free(struct xen_vbd *vbd)
329 {
330 	if (vbd->bdev)
331 		blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE);
332 	vbd->bdev = NULL;
333 }
334 
335 static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
336 			  unsigned major, unsigned minor, int readonly,
337 			  int cdrom)
338 {
339 	struct xen_vbd *vbd;
340 	struct block_device *bdev;
341 	struct request_queue *q;
342 
343 	vbd = &blkif->vbd;
344 	vbd->handle   = handle;
345 	vbd->readonly = readonly;
346 	vbd->type     = 0;
347 
348 	vbd->pdevice  = MKDEV(major, minor);
349 
350 	bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
351 				 FMODE_READ : FMODE_WRITE, NULL);
352 
353 	if (IS_ERR(bdev)) {
354 		DPRINTK("xen_vbd_create: device %08x could not be opened.\n",
355 			vbd->pdevice);
356 		return -ENOENT;
357 	}
358 
359 	vbd->bdev = bdev;
360 	if (vbd->bdev->bd_disk == NULL) {
361 		DPRINTK("xen_vbd_create: device %08x doesn't exist.\n",
362 			vbd->pdevice);
363 		xen_vbd_free(vbd);
364 		return -ENOENT;
365 	}
366 	vbd->size = vbd_sz(vbd);
367 
368 	if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
369 		vbd->type |= VDISK_CDROM;
370 	if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
371 		vbd->type |= VDISK_REMOVABLE;
372 
373 	q = bdev_get_queue(bdev);
374 	if (q && q->flush_flags)
375 		vbd->flush_support = true;
376 
377 	DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
378 		handle, blkif->domid);
379 	return 0;
380 }
381 static int xen_blkbk_remove(struct xenbus_device *dev)
382 {
383 	struct backend_info *be = dev_get_drvdata(&dev->dev);
384 
385 	DPRINTK("");
386 
387 	if (be->major || be->minor)
388 		xenvbd_sysfs_delif(dev);
389 
390 	if (be->backend_watch.node) {
391 		unregister_xenbus_watch(&be->backend_watch);
392 		kfree(be->backend_watch.node);
393 		be->backend_watch.node = NULL;
394 	}
395 
396 	if (be->blkif) {
397 		xen_blkif_disconnect(be->blkif);
398 		xen_vbd_free(&be->blkif->vbd);
399 		xen_blkif_free(be->blkif);
400 		be->blkif = NULL;
401 	}
402 
403 	kfree(be);
404 	dev_set_drvdata(&dev->dev, NULL);
405 	return 0;
406 }
407 
408 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
409 			      struct backend_info *be, int state)
410 {
411 	struct xenbus_device *dev = be->dev;
412 	int err;
413 
414 	err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
415 			    "%d", state);
416 	if (err)
417 		xenbus_dev_fatal(dev, err, "writing feature-flush-cache");
418 
419 	return err;
420 }
421 
422 /*
423  * Entry point to this code when a new device is created.  Allocate the basic
424  * structures, and watch the store waiting for the hotplug scripts to tell us
425  * the device's physical major and minor numbers.  Switch to InitWait.
426  */
427 static int xen_blkbk_probe(struct xenbus_device *dev,
428 			   const struct xenbus_device_id *id)
429 {
430 	int err;
431 	struct backend_info *be = kzalloc(sizeof(struct backend_info),
432 					  GFP_KERNEL);
433 	if (!be) {
434 		xenbus_dev_fatal(dev, -ENOMEM,
435 				 "allocating backend structure");
436 		return -ENOMEM;
437 	}
438 	be->dev = dev;
439 	dev_set_drvdata(&dev->dev, be);
440 
441 	be->blkif = xen_blkif_alloc(dev->otherend_id);
442 	if (IS_ERR(be->blkif)) {
443 		err = PTR_ERR(be->blkif);
444 		be->blkif = NULL;
445 		xenbus_dev_fatal(dev, err, "creating block interface");
446 		goto fail;
447 	}
448 
449 	/* setup back pointer */
450 	be->blkif->be = be;
451 
452 	err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed,
453 				   "%s/%s", dev->nodename, "physical-device");
454 	if (err)
455 		goto fail;
456 
457 	err = xenbus_switch_state(dev, XenbusStateInitWait);
458 	if (err)
459 		goto fail;
460 
461 	return 0;
462 
463 fail:
464 	DPRINTK("failed");
465 	xen_blkbk_remove(dev);
466 	return err;
467 }
468 
469 
470 /*
471  * Callback received when the hotplug scripts have placed the physical-device
472  * node.  Read it and the mode node, and create a vbd.  If the frontend is
473  * ready, connect.
474  */
475 static void backend_changed(struct xenbus_watch *watch,
476 			    const char **vec, unsigned int len)
477 {
478 	int err;
479 	unsigned major;
480 	unsigned minor;
481 	struct backend_info *be
482 		= container_of(watch, struct backend_info, backend_watch);
483 	struct xenbus_device *dev = be->dev;
484 	int cdrom = 0;
485 	char *device_type;
486 
487 	DPRINTK("");
488 
489 	err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
490 			   &major, &minor);
491 	if (XENBUS_EXIST_ERR(err)) {
492 		/*
493 		 * Since this watch will fire once immediately after it is
494 		 * registered, we expect this.  Ignore it, and wait for the
495 		 * hotplug scripts.
496 		 */
497 		return;
498 	}
499 	if (err != 2) {
500 		xenbus_dev_fatal(dev, err, "reading physical-device");
501 		return;
502 	}
503 
504 	if ((be->major || be->minor) &&
505 	    ((be->major != major) || (be->minor != minor))) {
506 		pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
507 			be->major, be->minor, major, minor);
508 		return;
509 	}
510 
511 	be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
512 	if (IS_ERR(be->mode)) {
513 		err = PTR_ERR(be->mode);
514 		be->mode = NULL;
515 		xenbus_dev_fatal(dev, err, "reading mode");
516 		return;
517 	}
518 
519 	device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL);
520 	if (!IS_ERR(device_type)) {
521 		cdrom = strcmp(device_type, "cdrom") == 0;
522 		kfree(device_type);
523 	}
524 
525 	if (be->major == 0 && be->minor == 0) {
526 		/* Front end dir is a number, which is used as the handle. */
527 
528 		char *p = strrchr(dev->otherend, '/') + 1;
529 		long handle;
530 		err = strict_strtoul(p, 0, &handle);
531 		if (err)
532 			return;
533 
534 		be->major = major;
535 		be->minor = minor;
536 
537 		err = xen_vbd_create(be->blkif, handle, major, minor,
538 				 (NULL == strchr(be->mode, 'w')), cdrom);
539 		if (err) {
540 			be->major = 0;
541 			be->minor = 0;
542 			xenbus_dev_fatal(dev, err, "creating vbd structure");
543 			return;
544 		}
545 
546 		err = xenvbd_sysfs_addif(dev);
547 		if (err) {
548 			xen_vbd_free(&be->blkif->vbd);
549 			be->major = 0;
550 			be->minor = 0;
551 			xenbus_dev_fatal(dev, err, "creating sysfs entries");
552 			return;
553 		}
554 
555 		/* We're potentially connected now */
556 		xen_update_blkif_status(be->blkif);
557 	}
558 }
559 
560 
561 /*
562  * Callback received when the frontend's state changes.
563  */
564 static void frontend_changed(struct xenbus_device *dev,
565 			     enum xenbus_state frontend_state)
566 {
567 	struct backend_info *be = dev_get_drvdata(&dev->dev);
568 	int err;
569 
570 	DPRINTK("%s", xenbus_strstate(frontend_state));
571 
572 	switch (frontend_state) {
573 	case XenbusStateInitialising:
574 		if (dev->state == XenbusStateClosed) {
575 			pr_info(DRV_PFX "%s: prepare for reconnect\n",
576 				dev->nodename);
577 			xenbus_switch_state(dev, XenbusStateInitWait);
578 		}
579 		break;
580 
581 	case XenbusStateInitialised:
582 	case XenbusStateConnected:
583 		/*
584 		 * Ensure we connect even when two watches fire in
585 		 * close successsion and we miss the intermediate value
586 		 * of frontend_state.
587 		 */
588 		if (dev->state == XenbusStateConnected)
589 			break;
590 
591 		/*
592 		 * Enforce precondition before potential leak point.
593 		 * blkif_disconnect() is idempotent.
594 		 */
595 		xen_blkif_disconnect(be->blkif);
596 
597 		err = connect_ring(be);
598 		if (err)
599 			break;
600 		xen_update_blkif_status(be->blkif);
601 		break;
602 
603 	case XenbusStateClosing:
604 		xen_blkif_disconnect(be->blkif);
605 		xenbus_switch_state(dev, XenbusStateClosing);
606 		break;
607 
608 	case XenbusStateClosed:
609 		xenbus_switch_state(dev, XenbusStateClosed);
610 		if (xenbus_dev_is_online(dev))
611 			break;
612 		/* fall through if not online */
613 	case XenbusStateUnknown:
614 		/* implies blkif_disconnect() via blkback_remove() */
615 		device_unregister(&dev->dev);
616 		break;
617 
618 	default:
619 		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
620 				 frontend_state);
621 		break;
622 	}
623 }
624 
625 
626 /* ** Connection ** */
627 
628 
629 /*
630  * Write the physical details regarding the block device to the store, and
631  * switch to Connected state.
632  */
633 static void connect(struct backend_info *be)
634 {
635 	struct xenbus_transaction xbt;
636 	int err;
637 	struct xenbus_device *dev = be->dev;
638 
639 	DPRINTK("%s", dev->otherend);
640 
641 	/* Supply the information about the device the frontend needs */
642 again:
643 	err = xenbus_transaction_start(&xbt);
644 	if (err) {
645 		xenbus_dev_fatal(dev, err, "starting transaction");
646 		return;
647 	}
648 
649 	err = xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
650 	if (err)
651 		goto abort;
652 
653 	err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
654 			    (unsigned long long)vbd_sz(&be->blkif->vbd));
655 	if (err) {
656 		xenbus_dev_fatal(dev, err, "writing %s/sectors",
657 				 dev->nodename);
658 		goto abort;
659 	}
660 
661 	/* FIXME: use a typename instead */
662 	err = xenbus_printf(xbt, dev->nodename, "info", "%u",
663 			    be->blkif->vbd.type |
664 			    (be->blkif->vbd.readonly ? VDISK_READONLY : 0));
665 	if (err) {
666 		xenbus_dev_fatal(dev, err, "writing %s/info",
667 				 dev->nodename);
668 		goto abort;
669 	}
670 	err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
671 			    (unsigned long)
672 			    bdev_logical_block_size(be->blkif->vbd.bdev));
673 	if (err) {
674 		xenbus_dev_fatal(dev, err, "writing %s/sector-size",
675 				 dev->nodename);
676 		goto abort;
677 	}
678 
679 	err = xenbus_transaction_end(xbt, 0);
680 	if (err == -EAGAIN)
681 		goto again;
682 	if (err)
683 		xenbus_dev_fatal(dev, err, "ending transaction");
684 
685 	err = xenbus_switch_state(dev, XenbusStateConnected);
686 	if (err)
687 		xenbus_dev_fatal(dev, err, "%s: switching to Connected state",
688 				 dev->nodename);
689 
690 	return;
691  abort:
692 	xenbus_transaction_end(xbt, 1);
693 }
694 
695 
696 static int connect_ring(struct backend_info *be)
697 {
698 	struct xenbus_device *dev = be->dev;
699 	unsigned long ring_ref;
700 	unsigned int evtchn;
701 	char protocol[64] = "";
702 	int err;
703 
704 	DPRINTK("%s", dev->otherend);
705 
706 	err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu",
707 			    &ring_ref, "event-channel", "%u", &evtchn, NULL);
708 	if (err) {
709 		xenbus_dev_fatal(dev, err,
710 				 "reading %s/ring-ref and event-channel",
711 				 dev->otherend);
712 		return err;
713 	}
714 
715 	be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
716 	err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
717 			    "%63s", protocol, NULL);
718 	if (err)
719 		strcpy(protocol, "unspecified, assuming native");
720 	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
721 		be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
722 	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
723 		be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
724 	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
725 		be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
726 	else {
727 		xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
728 		return -1;
729 	}
730 	pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s)\n",
731 		ring_ref, evtchn, be->blkif->blk_protocol, protocol);
732 
733 	/* Map the shared frame, irq etc. */
734 	err = xen_blkif_map(be->blkif, ring_ref, evtchn);
735 	if (err) {
736 		xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
737 				 ring_ref, evtchn);
738 		return err;
739 	}
740 
741 	return 0;
742 }
743 
744 
745 /* ** Driver Registration ** */
746 
747 
748 static const struct xenbus_device_id xen_blkbk_ids[] = {
749 	{ "vbd" },
750 	{ "" }
751 };
752 
753 
754 static struct xenbus_driver xen_blkbk = {
755 	.name = "vbd",
756 	.owner = THIS_MODULE,
757 	.ids = xen_blkbk_ids,
758 	.probe = xen_blkbk_probe,
759 	.remove = xen_blkbk_remove,
760 	.otherend_changed = frontend_changed
761 };
762 
763 
764 int xen_blkif_xenbus_init(void)
765 {
766 	return xenbus_register_backend(&xen_blkbk);
767 }
768