1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Xenbus code for blkif backend 3 Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au> 4 Copyright (C) 2005 XenSource Ltd 5 6 7 */ 8 9 #define pr_fmt(fmt) "xen-blkback: " fmt 10 11 #include <stdarg.h> 12 #include <linux/module.h> 13 #include <linux/kthread.h> 14 #include <xen/events.h> 15 #include <xen/grant_table.h> 16 #include "common.h" 17 18 /* On the XenBus the max length of 'ring-ref%u'. */ 19 #define RINGREF_NAME_LEN (20) 20 21 struct backend_info { 22 struct xenbus_device *dev; 23 struct xen_blkif *blkif; 24 struct xenbus_watch backend_watch; 25 unsigned major; 26 unsigned minor; 27 char *mode; 28 }; 29 30 static struct kmem_cache *xen_blkif_cachep; 31 static void connect(struct backend_info *); 32 static int connect_ring(struct backend_info *); 33 static void backend_changed(struct xenbus_watch *, const char *, 34 const char *); 35 static void xen_blkif_free(struct xen_blkif *blkif); 36 static void xen_vbd_free(struct xen_vbd *vbd); 37 38 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be) 39 { 40 return be->dev; 41 } 42 43 /* 44 * The last request could free the device from softirq context and 45 * xen_blkif_free() can sleep. 46 */ 47 static void xen_blkif_deferred_free(struct work_struct *work) 48 { 49 struct xen_blkif *blkif; 50 51 blkif = container_of(work, struct xen_blkif, free_work); 52 xen_blkif_free(blkif); 53 } 54 55 static int blkback_name(struct xen_blkif *blkif, char *buf) 56 { 57 char *devpath, *devname; 58 struct xenbus_device *dev = blkif->be->dev; 59 60 devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL); 61 if (IS_ERR(devpath)) 62 return PTR_ERR(devpath); 63 64 devname = strstr(devpath, "/dev/"); 65 if (devname != NULL) 66 devname += strlen("/dev/"); 67 else 68 devname = devpath; 69 70 snprintf(buf, TASK_COMM_LEN, "%d.%s", blkif->domid, devname); 71 kfree(devpath); 72 73 return 0; 74 } 75 76 static void xen_update_blkif_status(struct xen_blkif *blkif) 77 { 78 int err; 79 char name[TASK_COMM_LEN]; 80 struct xen_blkif_ring *ring; 81 int i; 82 83 /* Not ready to connect? */ 84 if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev) 85 return; 86 87 /* Already connected? */ 88 if (blkif->be->dev->state == XenbusStateConnected) 89 return; 90 91 /* Attempt to connect: exit if we fail to. */ 92 connect(blkif->be); 93 if (blkif->be->dev->state != XenbusStateConnected) 94 return; 95 96 err = blkback_name(blkif, name); 97 if (err) { 98 xenbus_dev_error(blkif->be->dev, err, "get blkback dev name"); 99 return; 100 } 101 102 err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping); 103 if (err) { 104 xenbus_dev_error(blkif->be->dev, err, "block flush"); 105 return; 106 } 107 invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping); 108 109 for (i = 0; i < blkif->nr_rings; i++) { 110 ring = &blkif->rings[i]; 111 ring->xenblkd = kthread_run(xen_blkif_schedule, ring, "%s-%d", name, i); 112 if (IS_ERR(ring->xenblkd)) { 113 err = PTR_ERR(ring->xenblkd); 114 ring->xenblkd = NULL; 115 xenbus_dev_fatal(blkif->be->dev, err, 116 "start %s-%d xenblkd", name, i); 117 goto out; 118 } 119 } 120 return; 121 122 out: 123 while (--i >= 0) { 124 ring = &blkif->rings[i]; 125 kthread_stop(ring->xenblkd); 126 } 127 return; 128 } 129 130 static int xen_blkif_alloc_rings(struct xen_blkif *blkif) 131 { 132 unsigned int r; 133 134 blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring), 135 GFP_KERNEL); 136 if (!blkif->rings) 137 return -ENOMEM; 138 139 for (r = 0; r < blkif->nr_rings; r++) { 140 struct xen_blkif_ring *ring = &blkif->rings[r]; 141 142 spin_lock_init(&ring->blk_ring_lock); 143 init_waitqueue_head(&ring->wq); 144 INIT_LIST_HEAD(&ring->pending_free); 145 INIT_LIST_HEAD(&ring->persistent_purge_list); 146 INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants); 147 spin_lock_init(&ring->free_pages_lock); 148 INIT_LIST_HEAD(&ring->free_pages); 149 150 spin_lock_init(&ring->pending_free_lock); 151 init_waitqueue_head(&ring->pending_free_wq); 152 init_waitqueue_head(&ring->shutdown_wq); 153 ring->blkif = blkif; 154 ring->st_print = jiffies; 155 ring->active = true; 156 } 157 158 return 0; 159 } 160 161 static struct xen_blkif *xen_blkif_alloc(domid_t domid) 162 { 163 struct xen_blkif *blkif; 164 165 BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); 166 167 blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL); 168 if (!blkif) 169 return ERR_PTR(-ENOMEM); 170 171 blkif->domid = domid; 172 atomic_set(&blkif->refcnt, 1); 173 init_completion(&blkif->drain_complete); 174 175 /* 176 * Because freeing back to the cache may be deferred, it is not 177 * safe to unload the module (and hence destroy the cache) until 178 * this has completed. To prevent premature unloading, take an 179 * extra module reference here and release only when the object 180 * has been freed back to the cache. 181 */ 182 __module_get(THIS_MODULE); 183 INIT_WORK(&blkif->free_work, xen_blkif_deferred_free); 184 185 return blkif; 186 } 187 188 static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref, 189 unsigned int nr_grefs, unsigned int evtchn) 190 { 191 int err; 192 struct xen_blkif *blkif = ring->blkif; 193 const struct blkif_common_sring *sring_common; 194 RING_IDX rsp_prod, req_prod; 195 unsigned int size; 196 197 /* Already connected through? */ 198 if (ring->irq) 199 return 0; 200 201 err = xenbus_map_ring_valloc(blkif->be->dev, gref, nr_grefs, 202 &ring->blk_ring); 203 if (err < 0) 204 return err; 205 206 sring_common = (struct blkif_common_sring *)ring->blk_ring; 207 rsp_prod = READ_ONCE(sring_common->rsp_prod); 208 req_prod = READ_ONCE(sring_common->req_prod); 209 210 switch (blkif->blk_protocol) { 211 case BLKIF_PROTOCOL_NATIVE: 212 { 213 struct blkif_sring *sring_native = 214 (struct blkif_sring *)ring->blk_ring; 215 216 BACK_RING_ATTACH(&ring->blk_rings.native, sring_native, 217 rsp_prod, XEN_PAGE_SIZE * nr_grefs); 218 size = __RING_SIZE(sring_native, XEN_PAGE_SIZE * nr_grefs); 219 break; 220 } 221 case BLKIF_PROTOCOL_X86_32: 222 { 223 struct blkif_x86_32_sring *sring_x86_32 = 224 (struct blkif_x86_32_sring *)ring->blk_ring; 225 226 BACK_RING_ATTACH(&ring->blk_rings.x86_32, sring_x86_32, 227 rsp_prod, XEN_PAGE_SIZE * nr_grefs); 228 size = __RING_SIZE(sring_x86_32, XEN_PAGE_SIZE * nr_grefs); 229 break; 230 } 231 case BLKIF_PROTOCOL_X86_64: 232 { 233 struct blkif_x86_64_sring *sring_x86_64 = 234 (struct blkif_x86_64_sring *)ring->blk_ring; 235 236 BACK_RING_ATTACH(&ring->blk_rings.x86_64, sring_x86_64, 237 rsp_prod, XEN_PAGE_SIZE * nr_grefs); 238 size = __RING_SIZE(sring_x86_64, XEN_PAGE_SIZE * nr_grefs); 239 break; 240 } 241 default: 242 BUG(); 243 } 244 245 err = -EIO; 246 if (req_prod - rsp_prod > size) 247 goto fail; 248 249 err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn, 250 xen_blkif_be_int, 0, 251 "blkif-backend", ring); 252 if (err < 0) 253 goto fail; 254 ring->irq = err; 255 256 return 0; 257 258 fail: 259 xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring); 260 ring->blk_rings.common.sring = NULL; 261 return err; 262 } 263 264 static int xen_blkif_disconnect(struct xen_blkif *blkif) 265 { 266 struct pending_req *req, *n; 267 unsigned int j, r; 268 bool busy = false; 269 270 for (r = 0; r < blkif->nr_rings; r++) { 271 struct xen_blkif_ring *ring = &blkif->rings[r]; 272 unsigned int i = 0; 273 274 if (!ring->active) 275 continue; 276 277 if (ring->xenblkd) { 278 kthread_stop(ring->xenblkd); 279 wake_up(&ring->shutdown_wq); 280 } 281 282 /* The above kthread_stop() guarantees that at this point we 283 * don't have any discard_io or other_io requests. So, checking 284 * for inflight IO is enough. 285 */ 286 if (atomic_read(&ring->inflight) > 0) { 287 busy = true; 288 continue; 289 } 290 291 if (ring->irq) { 292 unbind_from_irqhandler(ring->irq, ring); 293 ring->irq = 0; 294 } 295 296 if (ring->blk_rings.common.sring) { 297 xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring); 298 ring->blk_rings.common.sring = NULL; 299 } 300 301 /* Remove all persistent grants and the cache of ballooned pages. */ 302 xen_blkbk_free_caches(ring); 303 304 /* Check that there is no request in use */ 305 list_for_each_entry_safe(req, n, &ring->pending_free, free_list) { 306 list_del(&req->free_list); 307 308 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) 309 kfree(req->segments[j]); 310 311 for (j = 0; j < MAX_INDIRECT_PAGES; j++) 312 kfree(req->indirect_pages[j]); 313 314 kfree(req); 315 i++; 316 } 317 318 BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0); 319 BUG_ON(!list_empty(&ring->persistent_purge_list)); 320 BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts)); 321 BUG_ON(!list_empty(&ring->free_pages)); 322 BUG_ON(ring->free_pages_num != 0); 323 BUG_ON(ring->persistent_gnt_c != 0); 324 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); 325 ring->active = false; 326 } 327 if (busy) 328 return -EBUSY; 329 330 blkif->nr_ring_pages = 0; 331 /* 332 * blkif->rings was allocated in connect_ring, so we should free it in 333 * here. 334 */ 335 kfree(blkif->rings); 336 blkif->rings = NULL; 337 blkif->nr_rings = 0; 338 339 return 0; 340 } 341 342 static void xen_blkif_free(struct xen_blkif *blkif) 343 { 344 WARN_ON(xen_blkif_disconnect(blkif)); 345 xen_vbd_free(&blkif->vbd); 346 kfree(blkif->be->mode); 347 kfree(blkif->be); 348 349 /* Make sure everything is drained before shutting down */ 350 kmem_cache_free(xen_blkif_cachep, blkif); 351 module_put(THIS_MODULE); 352 } 353 354 int __init xen_blkif_interface_init(void) 355 { 356 xen_blkif_cachep = kmem_cache_create("blkif_cache", 357 sizeof(struct xen_blkif), 358 0, 0, NULL); 359 if (!xen_blkif_cachep) 360 return -ENOMEM; 361 362 return 0; 363 } 364 365 void xen_blkif_interface_fini(void) 366 { 367 kmem_cache_destroy(xen_blkif_cachep); 368 xen_blkif_cachep = NULL; 369 } 370 371 /* 372 * sysfs interface for VBD I/O requests 373 */ 374 375 #define VBD_SHOW_ALLRING(name, format) \ 376 static ssize_t show_##name(struct device *_dev, \ 377 struct device_attribute *attr, \ 378 char *buf) \ 379 { \ 380 struct xenbus_device *dev = to_xenbus_device(_dev); \ 381 struct backend_info *be = dev_get_drvdata(&dev->dev); \ 382 struct xen_blkif *blkif = be->blkif; \ 383 unsigned int i; \ 384 unsigned long long result = 0; \ 385 \ 386 if (!blkif->rings) \ 387 goto out; \ 388 \ 389 for (i = 0; i < blkif->nr_rings; i++) { \ 390 struct xen_blkif_ring *ring = &blkif->rings[i]; \ 391 \ 392 result += ring->st_##name; \ 393 } \ 394 \ 395 out: \ 396 return sprintf(buf, format, result); \ 397 } \ 398 static DEVICE_ATTR(name, 0444, show_##name, NULL) 399 400 VBD_SHOW_ALLRING(oo_req, "%llu\n"); 401 VBD_SHOW_ALLRING(rd_req, "%llu\n"); 402 VBD_SHOW_ALLRING(wr_req, "%llu\n"); 403 VBD_SHOW_ALLRING(f_req, "%llu\n"); 404 VBD_SHOW_ALLRING(ds_req, "%llu\n"); 405 VBD_SHOW_ALLRING(rd_sect, "%llu\n"); 406 VBD_SHOW_ALLRING(wr_sect, "%llu\n"); 407 408 static struct attribute *xen_vbdstat_attrs[] = { 409 &dev_attr_oo_req.attr, 410 &dev_attr_rd_req.attr, 411 &dev_attr_wr_req.attr, 412 &dev_attr_f_req.attr, 413 &dev_attr_ds_req.attr, 414 &dev_attr_rd_sect.attr, 415 &dev_attr_wr_sect.attr, 416 NULL 417 }; 418 419 static const struct attribute_group xen_vbdstat_group = { 420 .name = "statistics", 421 .attrs = xen_vbdstat_attrs, 422 }; 423 424 #define VBD_SHOW(name, format, args...) \ 425 static ssize_t show_##name(struct device *_dev, \ 426 struct device_attribute *attr, \ 427 char *buf) \ 428 { \ 429 struct xenbus_device *dev = to_xenbus_device(_dev); \ 430 struct backend_info *be = dev_get_drvdata(&dev->dev); \ 431 \ 432 return sprintf(buf, format, ##args); \ 433 } \ 434 static DEVICE_ATTR(name, 0444, show_##name, NULL) 435 436 VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor); 437 VBD_SHOW(mode, "%s\n", be->mode); 438 439 static int xenvbd_sysfs_addif(struct xenbus_device *dev) 440 { 441 int error; 442 443 error = device_create_file(&dev->dev, &dev_attr_physical_device); 444 if (error) 445 goto fail1; 446 447 error = device_create_file(&dev->dev, &dev_attr_mode); 448 if (error) 449 goto fail2; 450 451 error = sysfs_create_group(&dev->dev.kobj, &xen_vbdstat_group); 452 if (error) 453 goto fail3; 454 455 return 0; 456 457 fail3: sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group); 458 fail2: device_remove_file(&dev->dev, &dev_attr_mode); 459 fail1: device_remove_file(&dev->dev, &dev_attr_physical_device); 460 return error; 461 } 462 463 static void xenvbd_sysfs_delif(struct xenbus_device *dev) 464 { 465 sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group); 466 device_remove_file(&dev->dev, &dev_attr_mode); 467 device_remove_file(&dev->dev, &dev_attr_physical_device); 468 } 469 470 471 static void xen_vbd_free(struct xen_vbd *vbd) 472 { 473 if (vbd->bdev) 474 blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE); 475 vbd->bdev = NULL; 476 } 477 478 static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, 479 unsigned major, unsigned minor, int readonly, 480 int cdrom) 481 { 482 struct xen_vbd *vbd; 483 struct block_device *bdev; 484 struct request_queue *q; 485 486 vbd = &blkif->vbd; 487 vbd->handle = handle; 488 vbd->readonly = readonly; 489 vbd->type = 0; 490 491 vbd->pdevice = MKDEV(major, minor); 492 493 bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ? 494 FMODE_READ : FMODE_WRITE, NULL); 495 496 if (IS_ERR(bdev)) { 497 pr_warn("xen_vbd_create: device %08x could not be opened\n", 498 vbd->pdevice); 499 return -ENOENT; 500 } 501 502 vbd->bdev = bdev; 503 if (vbd->bdev->bd_disk == NULL) { 504 pr_warn("xen_vbd_create: device %08x doesn't exist\n", 505 vbd->pdevice); 506 xen_vbd_free(vbd); 507 return -ENOENT; 508 } 509 vbd->size = vbd_sz(vbd); 510 511 if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom) 512 vbd->type |= VDISK_CDROM; 513 if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE) 514 vbd->type |= VDISK_REMOVABLE; 515 516 q = bdev_get_queue(bdev); 517 if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 518 vbd->flush_support = true; 519 520 if (q && blk_queue_secure_erase(q)) 521 vbd->discard_secure = true; 522 523 pr_debug("Successful creation of handle=%04x (dom=%u)\n", 524 handle, blkif->domid); 525 return 0; 526 } 527 static int xen_blkbk_remove(struct xenbus_device *dev) 528 { 529 struct backend_info *be = dev_get_drvdata(&dev->dev); 530 531 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id); 532 533 if (be->major || be->minor) 534 xenvbd_sysfs_delif(dev); 535 536 if (be->backend_watch.node) { 537 unregister_xenbus_watch(&be->backend_watch); 538 kfree(be->backend_watch.node); 539 be->backend_watch.node = NULL; 540 } 541 542 dev_set_drvdata(&dev->dev, NULL); 543 544 if (be->blkif) { 545 xen_blkif_disconnect(be->blkif); 546 547 /* Put the reference we set in xen_blkif_alloc(). */ 548 xen_blkif_put(be->blkif); 549 } 550 551 return 0; 552 } 553 554 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, 555 struct backend_info *be, int state) 556 { 557 struct xenbus_device *dev = be->dev; 558 int err; 559 560 err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache", 561 "%d", state); 562 if (err) 563 dev_warn(&dev->dev, "writing feature-flush-cache (%d)", err); 564 565 return err; 566 } 567 568 static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be) 569 { 570 struct xenbus_device *dev = be->dev; 571 struct xen_blkif *blkif = be->blkif; 572 int err; 573 int state = 0; 574 struct block_device *bdev = be->blkif->vbd.bdev; 575 struct request_queue *q = bdev_get_queue(bdev); 576 577 if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1)) 578 return; 579 580 if (blk_queue_discard(q)) { 581 err = xenbus_printf(xbt, dev->nodename, 582 "discard-granularity", "%u", 583 q->limits.discard_granularity); 584 if (err) { 585 dev_warn(&dev->dev, "writing discard-granularity (%d)", err); 586 return; 587 } 588 err = xenbus_printf(xbt, dev->nodename, 589 "discard-alignment", "%u", 590 q->limits.discard_alignment); 591 if (err) { 592 dev_warn(&dev->dev, "writing discard-alignment (%d)", err); 593 return; 594 } 595 state = 1; 596 /* Optional. */ 597 err = xenbus_printf(xbt, dev->nodename, 598 "discard-secure", "%d", 599 blkif->vbd.discard_secure); 600 if (err) { 601 dev_warn(&dev->dev, "writing discard-secure (%d)", err); 602 return; 603 } 604 } 605 err = xenbus_printf(xbt, dev->nodename, "feature-discard", 606 "%d", state); 607 if (err) 608 dev_warn(&dev->dev, "writing feature-discard (%d)", err); 609 } 610 int xen_blkbk_barrier(struct xenbus_transaction xbt, 611 struct backend_info *be, int state) 612 { 613 struct xenbus_device *dev = be->dev; 614 int err; 615 616 err = xenbus_printf(xbt, dev->nodename, "feature-barrier", 617 "%d", state); 618 if (err) 619 dev_warn(&dev->dev, "writing feature-barrier (%d)", err); 620 621 return err; 622 } 623 624 /* 625 * Entry point to this code when a new device is created. Allocate the basic 626 * structures, and watch the store waiting for the hotplug scripts to tell us 627 * the device's physical major and minor numbers. Switch to InitWait. 628 */ 629 static int xen_blkbk_probe(struct xenbus_device *dev, 630 const struct xenbus_device_id *id) 631 { 632 int err; 633 struct backend_info *be = kzalloc(sizeof(struct backend_info), 634 GFP_KERNEL); 635 636 /* match the pr_debug in xen_blkbk_remove */ 637 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id); 638 639 if (!be) { 640 xenbus_dev_fatal(dev, -ENOMEM, 641 "allocating backend structure"); 642 return -ENOMEM; 643 } 644 be->dev = dev; 645 dev_set_drvdata(&dev->dev, be); 646 647 be->blkif = xen_blkif_alloc(dev->otherend_id); 648 if (IS_ERR(be->blkif)) { 649 err = PTR_ERR(be->blkif); 650 be->blkif = NULL; 651 xenbus_dev_fatal(dev, err, "creating block interface"); 652 goto fail; 653 } 654 655 err = xenbus_printf(XBT_NIL, dev->nodename, 656 "feature-max-indirect-segments", "%u", 657 MAX_INDIRECT_SEGMENTS); 658 if (err) 659 dev_warn(&dev->dev, 660 "writing %s/feature-max-indirect-segments (%d)", 661 dev->nodename, err); 662 663 /* Multi-queue: advertise how many queues are supported by us.*/ 664 err = xenbus_printf(XBT_NIL, dev->nodename, 665 "multi-queue-max-queues", "%u", xenblk_max_queues); 666 if (err) 667 pr_warn("Error writing multi-queue-max-queues\n"); 668 669 /* setup back pointer */ 670 be->blkif->be = be; 671 672 err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed, 673 "%s/%s", dev->nodename, "physical-device"); 674 if (err) 675 goto fail; 676 677 err = xenbus_printf(XBT_NIL, dev->nodename, "max-ring-page-order", "%u", 678 xen_blkif_max_ring_order); 679 if (err) 680 pr_warn("%s write out 'max-ring-page-order' failed\n", __func__); 681 682 err = xenbus_switch_state(dev, XenbusStateInitWait); 683 if (err) 684 goto fail; 685 686 return 0; 687 688 fail: 689 pr_warn("%s failed\n", __func__); 690 xen_blkbk_remove(dev); 691 return err; 692 } 693 694 695 /* 696 * Callback received when the hotplug scripts have placed the physical-device 697 * node. Read it and the mode node, and create a vbd. If the frontend is 698 * ready, connect. 699 */ 700 static void backend_changed(struct xenbus_watch *watch, 701 const char *path, const char *token) 702 { 703 int err; 704 unsigned major; 705 unsigned minor; 706 struct backend_info *be 707 = container_of(watch, struct backend_info, backend_watch); 708 struct xenbus_device *dev = be->dev; 709 int cdrom = 0; 710 unsigned long handle; 711 char *device_type; 712 713 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id); 714 715 err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x", 716 &major, &minor); 717 if (XENBUS_EXIST_ERR(err)) { 718 /* 719 * Since this watch will fire once immediately after it is 720 * registered, we expect this. Ignore it, and wait for the 721 * hotplug scripts. 722 */ 723 return; 724 } 725 if (err != 2) { 726 xenbus_dev_fatal(dev, err, "reading physical-device"); 727 return; 728 } 729 730 if (be->major | be->minor) { 731 if (be->major != major || be->minor != minor) 732 pr_warn("changing physical device (from %x:%x to %x:%x) not supported.\n", 733 be->major, be->minor, major, minor); 734 return; 735 } 736 737 be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL); 738 if (IS_ERR(be->mode)) { 739 err = PTR_ERR(be->mode); 740 be->mode = NULL; 741 xenbus_dev_fatal(dev, err, "reading mode"); 742 return; 743 } 744 745 device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL); 746 if (!IS_ERR(device_type)) { 747 cdrom = strcmp(device_type, "cdrom") == 0; 748 kfree(device_type); 749 } 750 751 /* Front end dir is a number, which is used as the handle. */ 752 err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle); 753 if (err) { 754 kfree(be->mode); 755 be->mode = NULL; 756 return; 757 } 758 759 be->major = major; 760 be->minor = minor; 761 762 err = xen_vbd_create(be->blkif, handle, major, minor, 763 !strchr(be->mode, 'w'), cdrom); 764 765 if (err) 766 xenbus_dev_fatal(dev, err, "creating vbd structure"); 767 else { 768 err = xenvbd_sysfs_addif(dev); 769 if (err) { 770 xen_vbd_free(&be->blkif->vbd); 771 xenbus_dev_fatal(dev, err, "creating sysfs entries"); 772 } 773 } 774 775 if (err) { 776 kfree(be->mode); 777 be->mode = NULL; 778 be->major = 0; 779 be->minor = 0; 780 } else { 781 /* We're potentially connected now */ 782 xen_update_blkif_status(be->blkif); 783 } 784 } 785 786 787 /* 788 * Callback received when the frontend's state changes. 789 */ 790 static void frontend_changed(struct xenbus_device *dev, 791 enum xenbus_state frontend_state) 792 { 793 struct backend_info *be = dev_get_drvdata(&dev->dev); 794 int err; 795 796 pr_debug("%s %p %s\n", __func__, dev, xenbus_strstate(frontend_state)); 797 798 switch (frontend_state) { 799 case XenbusStateInitialising: 800 if (dev->state == XenbusStateClosed) { 801 pr_info("%s: prepare for reconnect\n", dev->nodename); 802 xenbus_switch_state(dev, XenbusStateInitWait); 803 } 804 break; 805 806 case XenbusStateInitialised: 807 case XenbusStateConnected: 808 /* 809 * Ensure we connect even when two watches fire in 810 * close succession and we miss the intermediate value 811 * of frontend_state. 812 */ 813 if (dev->state == XenbusStateConnected) 814 break; 815 816 /* 817 * Enforce precondition before potential leak point. 818 * xen_blkif_disconnect() is idempotent. 819 */ 820 err = xen_blkif_disconnect(be->blkif); 821 if (err) { 822 xenbus_dev_fatal(dev, err, "pending I/O"); 823 break; 824 } 825 826 err = connect_ring(be); 827 if (err) { 828 /* 829 * Clean up so that memory resources can be used by 830 * other devices. connect_ring reported already error. 831 */ 832 xen_blkif_disconnect(be->blkif); 833 break; 834 } 835 xen_update_blkif_status(be->blkif); 836 break; 837 838 case XenbusStateClosing: 839 xenbus_switch_state(dev, XenbusStateClosing); 840 break; 841 842 case XenbusStateClosed: 843 xen_blkif_disconnect(be->blkif); 844 xenbus_switch_state(dev, XenbusStateClosed); 845 if (xenbus_dev_is_online(dev)) 846 break; 847 /* fall through */ 848 /* if not online */ 849 case XenbusStateUnknown: 850 /* implies xen_blkif_disconnect() via xen_blkbk_remove() */ 851 device_unregister(&dev->dev); 852 break; 853 854 default: 855 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend", 856 frontend_state); 857 break; 858 } 859 } 860 861 862 /* ** Connection ** */ 863 864 865 /* 866 * Write the physical details regarding the block device to the store, and 867 * switch to Connected state. 868 */ 869 static void connect(struct backend_info *be) 870 { 871 struct xenbus_transaction xbt; 872 int err; 873 struct xenbus_device *dev = be->dev; 874 875 pr_debug("%s %s\n", __func__, dev->otherend); 876 877 /* Supply the information about the device the frontend needs */ 878 again: 879 err = xenbus_transaction_start(&xbt); 880 if (err) { 881 xenbus_dev_fatal(dev, err, "starting transaction"); 882 return; 883 } 884 885 /* If we can't advertise it is OK. */ 886 xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support); 887 888 xen_blkbk_discard(xbt, be); 889 890 xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support); 891 892 err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1); 893 if (err) { 894 xenbus_dev_fatal(dev, err, "writing %s/feature-persistent", 895 dev->nodename); 896 goto abort; 897 } 898 899 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", 900 (unsigned long long)vbd_sz(&be->blkif->vbd)); 901 if (err) { 902 xenbus_dev_fatal(dev, err, "writing %s/sectors", 903 dev->nodename); 904 goto abort; 905 } 906 907 /* FIXME: use a typename instead */ 908 err = xenbus_printf(xbt, dev->nodename, "info", "%u", 909 be->blkif->vbd.type | 910 (be->blkif->vbd.readonly ? VDISK_READONLY : 0)); 911 if (err) { 912 xenbus_dev_fatal(dev, err, "writing %s/info", 913 dev->nodename); 914 goto abort; 915 } 916 err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu", 917 (unsigned long) 918 bdev_logical_block_size(be->blkif->vbd.bdev)); 919 if (err) { 920 xenbus_dev_fatal(dev, err, "writing %s/sector-size", 921 dev->nodename); 922 goto abort; 923 } 924 err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u", 925 bdev_physical_block_size(be->blkif->vbd.bdev)); 926 if (err) 927 xenbus_dev_error(dev, err, "writing %s/physical-sector-size", 928 dev->nodename); 929 930 err = xenbus_transaction_end(xbt, 0); 931 if (err == -EAGAIN) 932 goto again; 933 if (err) 934 xenbus_dev_fatal(dev, err, "ending transaction"); 935 936 err = xenbus_switch_state(dev, XenbusStateConnected); 937 if (err) 938 xenbus_dev_fatal(dev, err, "%s: switching to Connected state", 939 dev->nodename); 940 941 return; 942 abort: 943 xenbus_transaction_end(xbt, 1); 944 } 945 946 /* 947 * Each ring may have multi pages, depends on "ring-page-order". 948 */ 949 static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) 950 { 951 unsigned int ring_ref[XENBUS_MAX_RING_GRANTS]; 952 struct pending_req *req, *n; 953 int err, i, j; 954 struct xen_blkif *blkif = ring->blkif; 955 struct xenbus_device *dev = blkif->be->dev; 956 unsigned int nr_grefs, evtchn; 957 958 err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u", 959 &evtchn); 960 if (err != 1) { 961 err = -EINVAL; 962 xenbus_dev_fatal(dev, err, "reading %s/event-channel", dir); 963 return err; 964 } 965 966 nr_grefs = blkif->nr_ring_pages; 967 968 if (unlikely(!nr_grefs)) { 969 WARN_ON(true); 970 return -EINVAL; 971 } 972 973 for (i = 0; i < nr_grefs; i++) { 974 char ring_ref_name[RINGREF_NAME_LEN]; 975 976 snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i); 977 err = xenbus_scanf(XBT_NIL, dir, ring_ref_name, 978 "%u", &ring_ref[i]); 979 980 if (err != 1) { 981 if (nr_grefs == 1) 982 break; 983 984 err = -EINVAL; 985 xenbus_dev_fatal(dev, err, "reading %s/%s", 986 dir, ring_ref_name); 987 return err; 988 } 989 } 990 991 if (err != 1) { 992 WARN_ON(nr_grefs != 1); 993 994 err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", 995 &ring_ref[0]); 996 if (err != 1) { 997 err = -EINVAL; 998 xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir); 999 return err; 1000 } 1001 } 1002 1003 err = -ENOMEM; 1004 for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) { 1005 req = kzalloc(sizeof(*req), GFP_KERNEL); 1006 if (!req) 1007 goto fail; 1008 list_add_tail(&req->free_list, &ring->pending_free); 1009 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) { 1010 req->segments[j] = kzalloc(sizeof(*req->segments[0]), GFP_KERNEL); 1011 if (!req->segments[j]) 1012 goto fail; 1013 } 1014 for (j = 0; j < MAX_INDIRECT_PAGES; j++) { 1015 req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]), 1016 GFP_KERNEL); 1017 if (!req->indirect_pages[j]) 1018 goto fail; 1019 } 1020 } 1021 1022 /* Map the shared frame, irq etc. */ 1023 err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn); 1024 if (err) { 1025 xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn); 1026 goto fail; 1027 } 1028 1029 return 0; 1030 1031 fail: 1032 list_for_each_entry_safe(req, n, &ring->pending_free, free_list) { 1033 list_del(&req->free_list); 1034 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) { 1035 if (!req->segments[j]) 1036 break; 1037 kfree(req->segments[j]); 1038 } 1039 for (j = 0; j < MAX_INDIRECT_PAGES; j++) { 1040 if (!req->indirect_pages[j]) 1041 break; 1042 kfree(req->indirect_pages[j]); 1043 } 1044 kfree(req); 1045 } 1046 return err; 1047 } 1048 1049 static int connect_ring(struct backend_info *be) 1050 { 1051 struct xenbus_device *dev = be->dev; 1052 struct xen_blkif *blkif = be->blkif; 1053 unsigned int pers_grants; 1054 char protocol[64] = ""; 1055 int err, i; 1056 char *xspath; 1057 size_t xspathsize; 1058 const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */ 1059 unsigned int requested_num_queues = 0; 1060 unsigned int ring_page_order; 1061 1062 pr_debug("%s %s\n", __func__, dev->otherend); 1063 1064 blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT; 1065 err = xenbus_scanf(XBT_NIL, dev->otherend, "protocol", 1066 "%63s", protocol); 1067 if (err <= 0) 1068 strcpy(protocol, "unspecified, assuming default"); 1069 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE)) 1070 blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; 1071 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32)) 1072 blkif->blk_protocol = BLKIF_PROTOCOL_X86_32; 1073 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64)) 1074 blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; 1075 else { 1076 xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); 1077 return -ENOSYS; 1078 } 1079 pers_grants = xenbus_read_unsigned(dev->otherend, "feature-persistent", 1080 0); 1081 blkif->vbd.feature_gnt_persistent = pers_grants; 1082 blkif->vbd.overflow_max_grants = 0; 1083 1084 /* 1085 * Read the number of hardware queues from frontend. 1086 */ 1087 requested_num_queues = xenbus_read_unsigned(dev->otherend, 1088 "multi-queue-num-queues", 1089 1); 1090 if (requested_num_queues > xenblk_max_queues 1091 || requested_num_queues == 0) { 1092 /* Buggy or malicious guest. */ 1093 xenbus_dev_fatal(dev, err, 1094 "guest requested %u queues, exceeding the maximum of %u.", 1095 requested_num_queues, xenblk_max_queues); 1096 return -ENOSYS; 1097 } 1098 blkif->nr_rings = requested_num_queues; 1099 if (xen_blkif_alloc_rings(blkif)) 1100 return -ENOMEM; 1101 1102 pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev->nodename, 1103 blkif->nr_rings, blkif->blk_protocol, protocol, 1104 pers_grants ? "persistent grants" : ""); 1105 1106 ring_page_order = xenbus_read_unsigned(dev->otherend, 1107 "ring-page-order", 0); 1108 1109 if (ring_page_order > xen_blkif_max_ring_order) { 1110 err = -EINVAL; 1111 xenbus_dev_fatal(dev, err, 1112 "requested ring page order %d exceed max:%d", 1113 ring_page_order, 1114 xen_blkif_max_ring_order); 1115 return err; 1116 } 1117 1118 blkif->nr_ring_pages = 1 << ring_page_order; 1119 1120 if (blkif->nr_rings == 1) 1121 return read_per_ring_refs(&blkif->rings[0], dev->otherend); 1122 else { 1123 xspathsize = strlen(dev->otherend) + xenstore_path_ext_size; 1124 xspath = kmalloc(xspathsize, GFP_KERNEL); 1125 if (!xspath) { 1126 xenbus_dev_fatal(dev, -ENOMEM, "reading ring references"); 1127 return -ENOMEM; 1128 } 1129 1130 for (i = 0; i < blkif->nr_rings; i++) { 1131 memset(xspath, 0, xspathsize); 1132 snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, i); 1133 err = read_per_ring_refs(&blkif->rings[i], xspath); 1134 if (err) { 1135 kfree(xspath); 1136 return err; 1137 } 1138 } 1139 kfree(xspath); 1140 } 1141 return 0; 1142 } 1143 1144 static const struct xenbus_device_id xen_blkbk_ids[] = { 1145 { "vbd" }, 1146 { "" } 1147 }; 1148 1149 static struct xenbus_driver xen_blkbk_driver = { 1150 .ids = xen_blkbk_ids, 1151 .probe = xen_blkbk_probe, 1152 .remove = xen_blkbk_remove, 1153 .otherend_changed = frontend_changed, 1154 .allow_rebind = true, 1155 }; 1156 1157 int xen_blkif_xenbus_init(void) 1158 { 1159 return xenbus_register_backend(&xen_blkbk_driver); 1160 } 1161 1162 void xen_blkif_xenbus_fini(void) 1163 { 1164 xenbus_unregister_driver(&xen_blkbk_driver); 1165 } 1166