1 /* 2 * blkfront.c 3 * 4 * XenLinux virtual block device driver. 5 * 6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand 7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge 8 * Copyright (c) 2004, Christian Limpach 9 * Copyright (c) 2004, Andrew Warfield 10 * Copyright (c) 2005, Christopher Clark 11 * Copyright (c) 2005, XenSource Ltd 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License version 2 15 * as published by the Free Software Foundation; or, when distributed 16 * separately from the Linux kernel or incorporated into other 17 * software packages, subject to the following license: 18 * 19 * Permission is hereby granted, free of charge, to any person obtaining a copy 20 * of this source file (the "Software"), to deal in the Software without 21 * restriction, including without limitation the rights to use, copy, modify, 22 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 23 * and to permit persons to whom the Software is furnished to do so, subject to 24 * the following conditions: 25 * 26 * The above copyright notice and this permission notice shall be included in 27 * all copies or substantial portions of the Software. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 35 * IN THE SOFTWARE. 36 */ 37 38 #include <linux/interrupt.h> 39 #include <linux/blkdev.h> 40 #include <linux/blk-mq.h> 41 #include <linux/hdreg.h> 42 #include <linux/cdrom.h> 43 #include <linux/module.h> 44 #include <linux/slab.h> 45 #include <linux/mutex.h> 46 #include <linux/scatterlist.h> 47 #include <linux/bitmap.h> 48 #include <linux/list.h> 49 #include <linux/workqueue.h> 50 #include <linux/sched/mm.h> 51 52 #include <xen/xen.h> 53 #include <xen/xenbus.h> 54 #include <xen/grant_table.h> 55 #include <xen/events.h> 56 #include <xen/page.h> 57 #include <xen/platform_pci.h> 58 59 #include <xen/interface/grant_table.h> 60 #include <xen/interface/io/blkif.h> 61 #include <xen/interface/io/protocols.h> 62 63 #include <asm/xen/hypervisor.h> 64 65 /* 66 * The minimal size of segment supported by the block framework is PAGE_SIZE. 67 * When Linux is using a different page size than Xen, it may not be possible 68 * to put all the data in a single segment. 69 * This can happen when the backend doesn't support indirect descriptor and 70 * therefore the maximum amount of data that a request can carry is 71 * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE = 44KB 72 * 73 * Note that we only support one extra request. So the Linux page size 74 * should be <= ( 2 * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) = 75 * 88KB. 76 */ 77 #define HAS_EXTRA_REQ (BLKIF_MAX_SEGMENTS_PER_REQUEST < XEN_PFN_PER_PAGE) 78 79 enum blkif_state { 80 BLKIF_STATE_DISCONNECTED, 81 BLKIF_STATE_CONNECTED, 82 BLKIF_STATE_SUSPENDED, 83 }; 84 85 struct grant { 86 grant_ref_t gref; 87 struct page *page; 88 struct list_head node; 89 }; 90 91 enum blk_req_status { 92 REQ_WAITING, 93 REQ_DONE, 94 REQ_ERROR, 95 REQ_EOPNOTSUPP, 96 }; 97 98 struct blk_shadow { 99 struct blkif_request req; 100 struct request *request; 101 struct grant **grants_used; 102 struct grant **indirect_grants; 103 struct scatterlist *sg; 104 unsigned int num_sg; 105 enum blk_req_status status; 106 107 #define NO_ASSOCIATED_ID ~0UL 108 /* 109 * Id of the sibling if we ever need 2 requests when handling a 110 * block I/O request 111 */ 112 unsigned long associated_id; 113 }; 114 115 struct blkif_req { 116 blk_status_t error; 117 }; 118 119 static inline struct blkif_req *blkif_req(struct request *rq) 120 { 121 return blk_mq_rq_to_pdu(rq); 122 } 123 124 static DEFINE_MUTEX(blkfront_mutex); 125 static const struct block_device_operations xlvbd_block_fops; 126 static struct delayed_work blkfront_work; 127 static LIST_HEAD(info_list); 128 129 /* 130 * Maximum number of segments in indirect requests, the actual value used by 131 * the frontend driver is the minimum of this value and the value provided 132 * by the backend driver. 133 */ 134 135 static unsigned int xen_blkif_max_segments = 32; 136 module_param_named(max_indirect_segments, xen_blkif_max_segments, uint, 0444); 137 MODULE_PARM_DESC(max_indirect_segments, 138 "Maximum amount of segments in indirect requests (default is 32)"); 139 140 static unsigned int xen_blkif_max_queues = 4; 141 module_param_named(max_queues, xen_blkif_max_queues, uint, 0444); 142 MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues/rings used per virtual disk"); 143 144 /* 145 * Maximum order of pages to be used for the shared ring between front and 146 * backend, 4KB page granularity is used. 147 */ 148 static unsigned int xen_blkif_max_ring_order; 149 module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444); 150 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring"); 151 152 #define BLK_RING_SIZE(info) \ 153 __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages) 154 155 /* 156 * ring-ref%u i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19 157 * characters are enough. Define to 20 to keep consistent with backend. 158 */ 159 #define RINGREF_NAME_LEN (20) 160 /* 161 * queue-%u would take 7 + 10(UINT_MAX) = 17 characters. 162 */ 163 #define QUEUE_NAME_LEN (17) 164 165 /* 166 * Per-ring info. 167 * Every blkfront device can associate with one or more blkfront_ring_info, 168 * depending on how many hardware queues/rings to be used. 169 */ 170 struct blkfront_ring_info { 171 /* Lock to protect data in every ring buffer. */ 172 spinlock_t ring_lock; 173 struct blkif_front_ring ring; 174 unsigned int ring_ref[XENBUS_MAX_RING_GRANTS]; 175 unsigned int evtchn, irq; 176 struct work_struct work; 177 struct gnttab_free_callback callback; 178 struct list_head indirect_pages; 179 struct list_head grants; 180 unsigned int persistent_gnts_c; 181 unsigned long shadow_free; 182 struct blkfront_info *dev_info; 183 struct blk_shadow shadow[]; 184 }; 185 186 /* 187 * We have one of these per vbd, whether ide, scsi or 'other'. They 188 * hang in private_data off the gendisk structure. We may end up 189 * putting all kinds of interesting stuff here :-) 190 */ 191 struct blkfront_info 192 { 193 struct mutex mutex; 194 struct xenbus_device *xbdev; 195 struct gendisk *gd; 196 u16 sector_size; 197 unsigned int physical_sector_size; 198 int vdevice; 199 blkif_vdev_t handle; 200 enum blkif_state connected; 201 /* Number of pages per ring buffer. */ 202 unsigned int nr_ring_pages; 203 struct request_queue *rq; 204 unsigned int feature_flush:1; 205 unsigned int feature_fua:1; 206 unsigned int feature_discard:1; 207 unsigned int feature_secdiscard:1; 208 unsigned int feature_persistent:1; 209 unsigned int discard_granularity; 210 unsigned int discard_alignment; 211 /* Number of 4KB segments handled */ 212 unsigned int max_indirect_segments; 213 int is_ready; 214 struct blk_mq_tag_set tag_set; 215 struct blkfront_ring_info *rinfo; 216 unsigned int nr_rings; 217 unsigned int rinfo_size; 218 /* Save uncomplete reqs and bios for migration. */ 219 struct list_head requests; 220 struct bio_list bio_list; 221 struct list_head info_list; 222 }; 223 224 static unsigned int nr_minors; 225 static unsigned long *minors; 226 static DEFINE_SPINLOCK(minor_lock); 227 228 #define GRANT_INVALID_REF 0 229 230 #define PARTS_PER_DISK 16 231 #define PARTS_PER_EXT_DISK 256 232 233 #define BLKIF_MAJOR(dev) ((dev)>>8) 234 #define BLKIF_MINOR(dev) ((dev) & 0xff) 235 236 #define EXT_SHIFT 28 237 #define EXTENDED (1<<EXT_SHIFT) 238 #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED)) 239 #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED)) 240 #define EMULATED_HD_DISK_MINOR_OFFSET (0) 241 #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256) 242 #define EMULATED_SD_DISK_MINOR_OFFSET (0) 243 #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256) 244 245 #define DEV_NAME "xvd" /* name in /dev */ 246 247 /* 248 * Grants are always the same size as a Xen page (i.e 4KB). 249 * A physical segment is always the same size as a Linux page. 250 * Number of grants per physical segment 251 */ 252 #define GRANTS_PER_PSEG (PAGE_SIZE / XEN_PAGE_SIZE) 253 254 #define GRANTS_PER_INDIRECT_FRAME \ 255 (XEN_PAGE_SIZE / sizeof(struct blkif_request_segment)) 256 257 #define INDIRECT_GREFS(_grants) \ 258 DIV_ROUND_UP(_grants, GRANTS_PER_INDIRECT_FRAME) 259 260 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); 261 static void blkfront_gather_backend_features(struct blkfront_info *info); 262 static int negotiate_mq(struct blkfront_info *info); 263 264 #define for_each_rinfo(info, ptr, idx) \ 265 for ((ptr) = (info)->rinfo, (idx) = 0; \ 266 (idx) < (info)->nr_rings; \ 267 (idx)++, (ptr) = (void *)(ptr) + (info)->rinfo_size) 268 269 static inline struct blkfront_ring_info * 270 get_rinfo(const struct blkfront_info *info, unsigned int i) 271 { 272 BUG_ON(i >= info->nr_rings); 273 return (void *)info->rinfo + i * info->rinfo_size; 274 } 275 276 static int get_id_from_freelist(struct blkfront_ring_info *rinfo) 277 { 278 unsigned long free = rinfo->shadow_free; 279 280 BUG_ON(free >= BLK_RING_SIZE(rinfo->dev_info)); 281 rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id; 282 rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */ 283 return free; 284 } 285 286 static int add_id_to_freelist(struct blkfront_ring_info *rinfo, 287 unsigned long id) 288 { 289 if (rinfo->shadow[id].req.u.rw.id != id) 290 return -EINVAL; 291 if (rinfo->shadow[id].request == NULL) 292 return -EINVAL; 293 rinfo->shadow[id].req.u.rw.id = rinfo->shadow_free; 294 rinfo->shadow[id].request = NULL; 295 rinfo->shadow_free = id; 296 return 0; 297 } 298 299 static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num) 300 { 301 struct blkfront_info *info = rinfo->dev_info; 302 struct page *granted_page; 303 struct grant *gnt_list_entry, *n; 304 int i = 0; 305 306 while (i < num) { 307 gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO); 308 if (!gnt_list_entry) 309 goto out_of_memory; 310 311 if (info->feature_persistent) { 312 granted_page = alloc_page(GFP_NOIO); 313 if (!granted_page) { 314 kfree(gnt_list_entry); 315 goto out_of_memory; 316 } 317 gnt_list_entry->page = granted_page; 318 } 319 320 gnt_list_entry->gref = GRANT_INVALID_REF; 321 list_add(&gnt_list_entry->node, &rinfo->grants); 322 i++; 323 } 324 325 return 0; 326 327 out_of_memory: 328 list_for_each_entry_safe(gnt_list_entry, n, 329 &rinfo->grants, node) { 330 list_del(&gnt_list_entry->node); 331 if (info->feature_persistent) 332 __free_page(gnt_list_entry->page); 333 kfree(gnt_list_entry); 334 i--; 335 } 336 BUG_ON(i != 0); 337 return -ENOMEM; 338 } 339 340 static struct grant *get_free_grant(struct blkfront_ring_info *rinfo) 341 { 342 struct grant *gnt_list_entry; 343 344 BUG_ON(list_empty(&rinfo->grants)); 345 gnt_list_entry = list_first_entry(&rinfo->grants, struct grant, 346 node); 347 list_del(&gnt_list_entry->node); 348 349 if (gnt_list_entry->gref != GRANT_INVALID_REF) 350 rinfo->persistent_gnts_c--; 351 352 return gnt_list_entry; 353 } 354 355 static inline void grant_foreign_access(const struct grant *gnt_list_entry, 356 const struct blkfront_info *info) 357 { 358 gnttab_page_grant_foreign_access_ref_one(gnt_list_entry->gref, 359 info->xbdev->otherend_id, 360 gnt_list_entry->page, 361 0); 362 } 363 364 static struct grant *get_grant(grant_ref_t *gref_head, 365 unsigned long gfn, 366 struct blkfront_ring_info *rinfo) 367 { 368 struct grant *gnt_list_entry = get_free_grant(rinfo); 369 struct blkfront_info *info = rinfo->dev_info; 370 371 if (gnt_list_entry->gref != GRANT_INVALID_REF) 372 return gnt_list_entry; 373 374 /* Assign a gref to this page */ 375 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); 376 BUG_ON(gnt_list_entry->gref == -ENOSPC); 377 if (info->feature_persistent) 378 grant_foreign_access(gnt_list_entry, info); 379 else { 380 /* Grant access to the GFN passed by the caller */ 381 gnttab_grant_foreign_access_ref(gnt_list_entry->gref, 382 info->xbdev->otherend_id, 383 gfn, 0); 384 } 385 386 return gnt_list_entry; 387 } 388 389 static struct grant *get_indirect_grant(grant_ref_t *gref_head, 390 struct blkfront_ring_info *rinfo) 391 { 392 struct grant *gnt_list_entry = get_free_grant(rinfo); 393 struct blkfront_info *info = rinfo->dev_info; 394 395 if (gnt_list_entry->gref != GRANT_INVALID_REF) 396 return gnt_list_entry; 397 398 /* Assign a gref to this page */ 399 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); 400 BUG_ON(gnt_list_entry->gref == -ENOSPC); 401 if (!info->feature_persistent) { 402 struct page *indirect_page; 403 404 /* Fetch a pre-allocated page to use for indirect grefs */ 405 BUG_ON(list_empty(&rinfo->indirect_pages)); 406 indirect_page = list_first_entry(&rinfo->indirect_pages, 407 struct page, lru); 408 list_del(&indirect_page->lru); 409 gnt_list_entry->page = indirect_page; 410 } 411 grant_foreign_access(gnt_list_entry, info); 412 413 return gnt_list_entry; 414 } 415 416 static const char *op_name(int op) 417 { 418 static const char *const names[] = { 419 [BLKIF_OP_READ] = "read", 420 [BLKIF_OP_WRITE] = "write", 421 [BLKIF_OP_WRITE_BARRIER] = "barrier", 422 [BLKIF_OP_FLUSH_DISKCACHE] = "flush", 423 [BLKIF_OP_DISCARD] = "discard" }; 424 425 if (op < 0 || op >= ARRAY_SIZE(names)) 426 return "unknown"; 427 428 if (!names[op]) 429 return "reserved"; 430 431 return names[op]; 432 } 433 static int xlbd_reserve_minors(unsigned int minor, unsigned int nr) 434 { 435 unsigned int end = minor + nr; 436 int rc; 437 438 if (end > nr_minors) { 439 unsigned long *bitmap, *old; 440 441 bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap), 442 GFP_KERNEL); 443 if (bitmap == NULL) 444 return -ENOMEM; 445 446 spin_lock(&minor_lock); 447 if (end > nr_minors) { 448 old = minors; 449 memcpy(bitmap, minors, 450 BITS_TO_LONGS(nr_minors) * sizeof(*bitmap)); 451 minors = bitmap; 452 nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG; 453 } else 454 old = bitmap; 455 spin_unlock(&minor_lock); 456 kfree(old); 457 } 458 459 spin_lock(&minor_lock); 460 if (find_next_bit(minors, end, minor) >= end) { 461 bitmap_set(minors, minor, nr); 462 rc = 0; 463 } else 464 rc = -EBUSY; 465 spin_unlock(&minor_lock); 466 467 return rc; 468 } 469 470 static void xlbd_release_minors(unsigned int minor, unsigned int nr) 471 { 472 unsigned int end = minor + nr; 473 474 BUG_ON(end > nr_minors); 475 spin_lock(&minor_lock); 476 bitmap_clear(minors, minor, nr); 477 spin_unlock(&minor_lock); 478 } 479 480 static void blkif_restart_queue_callback(void *arg) 481 { 482 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg; 483 schedule_work(&rinfo->work); 484 } 485 486 static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) 487 { 488 /* We don't have real geometry info, but let's at least return 489 values consistent with the size of the device */ 490 sector_t nsect = get_capacity(bd->bd_disk); 491 sector_t cylinders = nsect; 492 493 hg->heads = 0xff; 494 hg->sectors = 0x3f; 495 sector_div(cylinders, hg->heads * hg->sectors); 496 hg->cylinders = cylinders; 497 if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) 498 hg->cylinders = 0xffff; 499 return 0; 500 } 501 502 static int blkif_ioctl(struct block_device *bdev, fmode_t mode, 503 unsigned command, unsigned long argument) 504 { 505 struct blkfront_info *info = bdev->bd_disk->private_data; 506 int i; 507 508 dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n", 509 command, (long)argument); 510 511 switch (command) { 512 case CDROMMULTISESSION: 513 dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n"); 514 for (i = 0; i < sizeof(struct cdrom_multisession); i++) 515 if (put_user(0, (char __user *)(argument + i))) 516 return -EFAULT; 517 return 0; 518 519 case CDROM_GET_CAPABILITY: { 520 struct gendisk *gd = info->gd; 521 if (gd->flags & GENHD_FL_CD) 522 return 0; 523 return -EINVAL; 524 } 525 526 default: 527 /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", 528 command);*/ 529 return -EINVAL; /* same return as native Linux */ 530 } 531 532 return 0; 533 } 534 535 static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo, 536 struct request *req, 537 struct blkif_request **ring_req) 538 { 539 unsigned long id; 540 541 *ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt); 542 rinfo->ring.req_prod_pvt++; 543 544 id = get_id_from_freelist(rinfo); 545 rinfo->shadow[id].request = req; 546 rinfo->shadow[id].status = REQ_WAITING; 547 rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID; 548 549 (*ring_req)->u.rw.id = id; 550 551 return id; 552 } 553 554 static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo) 555 { 556 struct blkfront_info *info = rinfo->dev_info; 557 struct blkif_request *ring_req; 558 unsigned long id; 559 560 /* Fill out a communications ring structure. */ 561 id = blkif_ring_get_request(rinfo, req, &ring_req); 562 563 ring_req->operation = BLKIF_OP_DISCARD; 564 ring_req->u.discard.nr_sectors = blk_rq_sectors(req); 565 ring_req->u.discard.id = id; 566 ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req); 567 if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard) 568 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE; 569 else 570 ring_req->u.discard.flag = 0; 571 572 /* Keep a private copy so we can reissue requests when recovering. */ 573 rinfo->shadow[id].req = *ring_req; 574 575 return 0; 576 } 577 578 struct setup_rw_req { 579 unsigned int grant_idx; 580 struct blkif_request_segment *segments; 581 struct blkfront_ring_info *rinfo; 582 struct blkif_request *ring_req; 583 grant_ref_t gref_head; 584 unsigned int id; 585 /* Only used when persistent grant is used and it's a read request */ 586 bool need_copy; 587 unsigned int bvec_off; 588 char *bvec_data; 589 590 bool require_extra_req; 591 struct blkif_request *extra_ring_req; 592 }; 593 594 static void blkif_setup_rw_req_grant(unsigned long gfn, unsigned int offset, 595 unsigned int len, void *data) 596 { 597 struct setup_rw_req *setup = data; 598 int n, ref; 599 struct grant *gnt_list_entry; 600 unsigned int fsect, lsect; 601 /* Convenient aliases */ 602 unsigned int grant_idx = setup->grant_idx; 603 struct blkif_request *ring_req = setup->ring_req; 604 struct blkfront_ring_info *rinfo = setup->rinfo; 605 /* 606 * We always use the shadow of the first request to store the list 607 * of grant associated to the block I/O request. This made the 608 * completion more easy to handle even if the block I/O request is 609 * split. 610 */ 611 struct blk_shadow *shadow = &rinfo->shadow[setup->id]; 612 613 if (unlikely(setup->require_extra_req && 614 grant_idx >= BLKIF_MAX_SEGMENTS_PER_REQUEST)) { 615 /* 616 * We are using the second request, setup grant_idx 617 * to be the index of the segment array. 618 */ 619 grant_idx -= BLKIF_MAX_SEGMENTS_PER_REQUEST; 620 ring_req = setup->extra_ring_req; 621 } 622 623 if ((ring_req->operation == BLKIF_OP_INDIRECT) && 624 (grant_idx % GRANTS_PER_INDIRECT_FRAME == 0)) { 625 if (setup->segments) 626 kunmap_atomic(setup->segments); 627 628 n = grant_idx / GRANTS_PER_INDIRECT_FRAME; 629 gnt_list_entry = get_indirect_grant(&setup->gref_head, rinfo); 630 shadow->indirect_grants[n] = gnt_list_entry; 631 setup->segments = kmap_atomic(gnt_list_entry->page); 632 ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; 633 } 634 635 gnt_list_entry = get_grant(&setup->gref_head, gfn, rinfo); 636 ref = gnt_list_entry->gref; 637 /* 638 * All the grants are stored in the shadow of the first 639 * request. Therefore we have to use the global index. 640 */ 641 shadow->grants_used[setup->grant_idx] = gnt_list_entry; 642 643 if (setup->need_copy) { 644 void *shared_data; 645 646 shared_data = kmap_atomic(gnt_list_entry->page); 647 /* 648 * this does not wipe data stored outside the 649 * range sg->offset..sg->offset+sg->length. 650 * Therefore, blkback *could* see data from 651 * previous requests. This is OK as long as 652 * persistent grants are shared with just one 653 * domain. It may need refactoring if this 654 * changes 655 */ 656 memcpy(shared_data + offset, 657 setup->bvec_data + setup->bvec_off, 658 len); 659 660 kunmap_atomic(shared_data); 661 setup->bvec_off += len; 662 } 663 664 fsect = offset >> 9; 665 lsect = fsect + (len >> 9) - 1; 666 if (ring_req->operation != BLKIF_OP_INDIRECT) { 667 ring_req->u.rw.seg[grant_idx] = 668 (struct blkif_request_segment) { 669 .gref = ref, 670 .first_sect = fsect, 671 .last_sect = lsect }; 672 } else { 673 setup->segments[grant_idx % GRANTS_PER_INDIRECT_FRAME] = 674 (struct blkif_request_segment) { 675 .gref = ref, 676 .first_sect = fsect, 677 .last_sect = lsect }; 678 } 679 680 (setup->grant_idx)++; 681 } 682 683 static void blkif_setup_extra_req(struct blkif_request *first, 684 struct blkif_request *second) 685 { 686 uint16_t nr_segments = first->u.rw.nr_segments; 687 688 /* 689 * The second request is only present when the first request uses 690 * all its segments. It's always the continuity of the first one. 691 */ 692 first->u.rw.nr_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; 693 694 second->u.rw.nr_segments = nr_segments - BLKIF_MAX_SEGMENTS_PER_REQUEST; 695 second->u.rw.sector_number = first->u.rw.sector_number + 696 (BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) / 512; 697 698 second->u.rw.handle = first->u.rw.handle; 699 second->operation = first->operation; 700 } 701 702 static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo) 703 { 704 struct blkfront_info *info = rinfo->dev_info; 705 struct blkif_request *ring_req, *extra_ring_req = NULL; 706 unsigned long id, extra_id = NO_ASSOCIATED_ID; 707 bool require_extra_req = false; 708 int i; 709 struct setup_rw_req setup = { 710 .grant_idx = 0, 711 .segments = NULL, 712 .rinfo = rinfo, 713 .need_copy = rq_data_dir(req) && info->feature_persistent, 714 }; 715 716 /* 717 * Used to store if we are able to queue the request by just using 718 * existing persistent grants, or if we have to get new grants, 719 * as there are not sufficiently many free. 720 */ 721 bool new_persistent_gnts = false; 722 struct scatterlist *sg; 723 int num_sg, max_grefs, num_grant; 724 725 max_grefs = req->nr_phys_segments * GRANTS_PER_PSEG; 726 if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST) 727 /* 728 * If we are using indirect segments we need to account 729 * for the indirect grefs used in the request. 730 */ 731 max_grefs += INDIRECT_GREFS(max_grefs); 732 733 /* Check if we have enough persistent grants to allocate a requests */ 734 if (rinfo->persistent_gnts_c < max_grefs) { 735 new_persistent_gnts = true; 736 737 if (gnttab_alloc_grant_references( 738 max_grefs - rinfo->persistent_gnts_c, 739 &setup.gref_head) < 0) { 740 gnttab_request_free_callback( 741 &rinfo->callback, 742 blkif_restart_queue_callback, 743 rinfo, 744 max_grefs - rinfo->persistent_gnts_c); 745 return 1; 746 } 747 } 748 749 /* Fill out a communications ring structure. */ 750 id = blkif_ring_get_request(rinfo, req, &ring_req); 751 752 num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg); 753 num_grant = 0; 754 /* Calculate the number of grant used */ 755 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) 756 num_grant += gnttab_count_grant(sg->offset, sg->length); 757 758 require_extra_req = info->max_indirect_segments == 0 && 759 num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST; 760 BUG_ON(!HAS_EXTRA_REQ && require_extra_req); 761 762 rinfo->shadow[id].num_sg = num_sg; 763 if (num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST && 764 likely(!require_extra_req)) { 765 /* 766 * The indirect operation can only be a BLKIF_OP_READ or 767 * BLKIF_OP_WRITE 768 */ 769 BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA); 770 ring_req->operation = BLKIF_OP_INDIRECT; 771 ring_req->u.indirect.indirect_op = rq_data_dir(req) ? 772 BLKIF_OP_WRITE : BLKIF_OP_READ; 773 ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req); 774 ring_req->u.indirect.handle = info->handle; 775 ring_req->u.indirect.nr_segments = num_grant; 776 } else { 777 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); 778 ring_req->u.rw.handle = info->handle; 779 ring_req->operation = rq_data_dir(req) ? 780 BLKIF_OP_WRITE : BLKIF_OP_READ; 781 if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) { 782 /* 783 * Ideally we can do an unordered flush-to-disk. 784 * In case the backend onlysupports barriers, use that. 785 * A barrier request a superset of FUA, so we can 786 * implement it the same way. (It's also a FLUSH+FUA, 787 * since it is guaranteed ordered WRT previous writes.) 788 */ 789 if (info->feature_flush && info->feature_fua) 790 ring_req->operation = 791 BLKIF_OP_WRITE_BARRIER; 792 else if (info->feature_flush) 793 ring_req->operation = 794 BLKIF_OP_FLUSH_DISKCACHE; 795 else 796 ring_req->operation = 0; 797 } 798 ring_req->u.rw.nr_segments = num_grant; 799 if (unlikely(require_extra_req)) { 800 extra_id = blkif_ring_get_request(rinfo, req, 801 &extra_ring_req); 802 /* 803 * Only the first request contains the scatter-gather 804 * list. 805 */ 806 rinfo->shadow[extra_id].num_sg = 0; 807 808 blkif_setup_extra_req(ring_req, extra_ring_req); 809 810 /* Link the 2 requests together */ 811 rinfo->shadow[extra_id].associated_id = id; 812 rinfo->shadow[id].associated_id = extra_id; 813 } 814 } 815 816 setup.ring_req = ring_req; 817 setup.id = id; 818 819 setup.require_extra_req = require_extra_req; 820 if (unlikely(require_extra_req)) 821 setup.extra_ring_req = extra_ring_req; 822 823 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) { 824 BUG_ON(sg->offset + sg->length > PAGE_SIZE); 825 826 if (setup.need_copy) { 827 setup.bvec_off = sg->offset; 828 setup.bvec_data = kmap_atomic(sg_page(sg)); 829 } 830 831 gnttab_foreach_grant_in_range(sg_page(sg), 832 sg->offset, 833 sg->length, 834 blkif_setup_rw_req_grant, 835 &setup); 836 837 if (setup.need_copy) 838 kunmap_atomic(setup.bvec_data); 839 } 840 if (setup.segments) 841 kunmap_atomic(setup.segments); 842 843 /* Keep a private copy so we can reissue requests when recovering. */ 844 rinfo->shadow[id].req = *ring_req; 845 if (unlikely(require_extra_req)) 846 rinfo->shadow[extra_id].req = *extra_ring_req; 847 848 if (new_persistent_gnts) 849 gnttab_free_grant_references(setup.gref_head); 850 851 return 0; 852 } 853 854 /* 855 * Generate a Xen blkfront IO request from a blk layer request. Reads 856 * and writes are handled as expected. 857 * 858 * @req: a request struct 859 */ 860 static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo) 861 { 862 if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED)) 863 return 1; 864 865 if (unlikely(req_op(req) == REQ_OP_DISCARD || 866 req_op(req) == REQ_OP_SECURE_ERASE)) 867 return blkif_queue_discard_req(req, rinfo); 868 else 869 return blkif_queue_rw_req(req, rinfo); 870 } 871 872 static inline void flush_requests(struct blkfront_ring_info *rinfo) 873 { 874 int notify; 875 876 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify); 877 878 if (notify) 879 notify_remote_via_irq(rinfo->irq); 880 } 881 882 static inline bool blkif_request_flush_invalid(struct request *req, 883 struct blkfront_info *info) 884 { 885 return (blk_rq_is_passthrough(req) || 886 ((req_op(req) == REQ_OP_FLUSH) && 887 !info->feature_flush) || 888 ((req->cmd_flags & REQ_FUA) && 889 !info->feature_fua)); 890 } 891 892 static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx, 893 const struct blk_mq_queue_data *qd) 894 { 895 unsigned long flags; 896 int qid = hctx->queue_num; 897 struct blkfront_info *info = hctx->queue->queuedata; 898 struct blkfront_ring_info *rinfo = NULL; 899 900 rinfo = get_rinfo(info, qid); 901 blk_mq_start_request(qd->rq); 902 spin_lock_irqsave(&rinfo->ring_lock, flags); 903 if (RING_FULL(&rinfo->ring)) 904 goto out_busy; 905 906 if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info)) 907 goto out_err; 908 909 if (blkif_queue_request(qd->rq, rinfo)) 910 goto out_busy; 911 912 flush_requests(rinfo); 913 spin_unlock_irqrestore(&rinfo->ring_lock, flags); 914 return BLK_STS_OK; 915 916 out_err: 917 spin_unlock_irqrestore(&rinfo->ring_lock, flags); 918 return BLK_STS_IOERR; 919 920 out_busy: 921 blk_mq_stop_hw_queue(hctx); 922 spin_unlock_irqrestore(&rinfo->ring_lock, flags); 923 return BLK_STS_DEV_RESOURCE; 924 } 925 926 static void blkif_complete_rq(struct request *rq) 927 { 928 blk_mq_end_request(rq, blkif_req(rq)->error); 929 } 930 931 static const struct blk_mq_ops blkfront_mq_ops = { 932 .queue_rq = blkif_queue_rq, 933 .complete = blkif_complete_rq, 934 }; 935 936 static void blkif_set_queue_limits(struct blkfront_info *info) 937 { 938 struct request_queue *rq = info->rq; 939 struct gendisk *gd = info->gd; 940 unsigned int segments = info->max_indirect_segments ? : 941 BLKIF_MAX_SEGMENTS_PER_REQUEST; 942 943 blk_queue_flag_set(QUEUE_FLAG_VIRT, rq); 944 945 if (info->feature_discard) { 946 blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq); 947 blk_queue_max_discard_sectors(rq, get_capacity(gd)); 948 rq->limits.discard_granularity = info->discard_granularity; 949 rq->limits.discard_alignment = info->discard_alignment; 950 if (info->feature_secdiscard) 951 blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq); 952 } 953 954 /* Hard sector size and max sectors impersonate the equiv. hardware. */ 955 blk_queue_logical_block_size(rq, info->sector_size); 956 blk_queue_physical_block_size(rq, info->physical_sector_size); 957 blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512); 958 959 /* Each segment in a request is up to an aligned page in size. */ 960 blk_queue_segment_boundary(rq, PAGE_SIZE - 1); 961 blk_queue_max_segment_size(rq, PAGE_SIZE); 962 963 /* Ensure a merged request will fit in a single I/O ring slot. */ 964 blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG); 965 966 /* Make sure buffer addresses are sector-aligned. */ 967 blk_queue_dma_alignment(rq, 511); 968 } 969 970 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, 971 unsigned int physical_sector_size) 972 { 973 struct request_queue *rq; 974 struct blkfront_info *info = gd->private_data; 975 976 memset(&info->tag_set, 0, sizeof(info->tag_set)); 977 info->tag_set.ops = &blkfront_mq_ops; 978 info->tag_set.nr_hw_queues = info->nr_rings; 979 if (HAS_EXTRA_REQ && info->max_indirect_segments == 0) { 980 /* 981 * When indirect descriptior is not supported, the I/O request 982 * will be split between multiple request in the ring. 983 * To avoid problems when sending the request, divide by 984 * 2 the depth of the queue. 985 */ 986 info->tag_set.queue_depth = BLK_RING_SIZE(info) / 2; 987 } else 988 info->tag_set.queue_depth = BLK_RING_SIZE(info); 989 info->tag_set.numa_node = NUMA_NO_NODE; 990 info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 991 info->tag_set.cmd_size = sizeof(struct blkif_req); 992 info->tag_set.driver_data = info; 993 994 if (blk_mq_alloc_tag_set(&info->tag_set)) 995 return -EINVAL; 996 rq = blk_mq_init_queue(&info->tag_set); 997 if (IS_ERR(rq)) { 998 blk_mq_free_tag_set(&info->tag_set); 999 return PTR_ERR(rq); 1000 } 1001 1002 rq->queuedata = info; 1003 info->rq = gd->queue = rq; 1004 info->gd = gd; 1005 info->sector_size = sector_size; 1006 info->physical_sector_size = physical_sector_size; 1007 blkif_set_queue_limits(info); 1008 1009 return 0; 1010 } 1011 1012 static const char *flush_info(struct blkfront_info *info) 1013 { 1014 if (info->feature_flush && info->feature_fua) 1015 return "barrier: enabled;"; 1016 else if (info->feature_flush) 1017 return "flush diskcache: enabled;"; 1018 else 1019 return "barrier or flush: disabled;"; 1020 } 1021 1022 static void xlvbd_flush(struct blkfront_info *info) 1023 { 1024 blk_queue_write_cache(info->rq, info->feature_flush ? true : false, 1025 info->feature_fua ? true : false); 1026 pr_info("blkfront: %s: %s %s %s %s %s\n", 1027 info->gd->disk_name, flush_info(info), 1028 "persistent grants:", info->feature_persistent ? 1029 "enabled;" : "disabled;", "indirect descriptors:", 1030 info->max_indirect_segments ? "enabled;" : "disabled;"); 1031 } 1032 1033 static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset) 1034 { 1035 int major; 1036 major = BLKIF_MAJOR(vdevice); 1037 *minor = BLKIF_MINOR(vdevice); 1038 switch (major) { 1039 case XEN_IDE0_MAJOR: 1040 *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET; 1041 *minor = ((*minor / 64) * PARTS_PER_DISK) + 1042 EMULATED_HD_DISK_MINOR_OFFSET; 1043 break; 1044 case XEN_IDE1_MAJOR: 1045 *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET; 1046 *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) + 1047 EMULATED_HD_DISK_MINOR_OFFSET; 1048 break; 1049 case XEN_SCSI_DISK0_MAJOR: 1050 *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET; 1051 *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET; 1052 break; 1053 case XEN_SCSI_DISK1_MAJOR: 1054 case XEN_SCSI_DISK2_MAJOR: 1055 case XEN_SCSI_DISK3_MAJOR: 1056 case XEN_SCSI_DISK4_MAJOR: 1057 case XEN_SCSI_DISK5_MAJOR: 1058 case XEN_SCSI_DISK6_MAJOR: 1059 case XEN_SCSI_DISK7_MAJOR: 1060 *offset = (*minor / PARTS_PER_DISK) + 1061 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) + 1062 EMULATED_SD_DISK_NAME_OFFSET; 1063 *minor = *minor + 1064 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) + 1065 EMULATED_SD_DISK_MINOR_OFFSET; 1066 break; 1067 case XEN_SCSI_DISK8_MAJOR: 1068 case XEN_SCSI_DISK9_MAJOR: 1069 case XEN_SCSI_DISK10_MAJOR: 1070 case XEN_SCSI_DISK11_MAJOR: 1071 case XEN_SCSI_DISK12_MAJOR: 1072 case XEN_SCSI_DISK13_MAJOR: 1073 case XEN_SCSI_DISK14_MAJOR: 1074 case XEN_SCSI_DISK15_MAJOR: 1075 *offset = (*minor / PARTS_PER_DISK) + 1076 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) + 1077 EMULATED_SD_DISK_NAME_OFFSET; 1078 *minor = *minor + 1079 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) + 1080 EMULATED_SD_DISK_MINOR_OFFSET; 1081 break; 1082 case XENVBD_MAJOR: 1083 *offset = *minor / PARTS_PER_DISK; 1084 break; 1085 default: 1086 printk(KERN_WARNING "blkfront: your disk configuration is " 1087 "incorrect, please use an xvd device instead\n"); 1088 return -ENODEV; 1089 } 1090 return 0; 1091 } 1092 1093 static char *encode_disk_name(char *ptr, unsigned int n) 1094 { 1095 if (n >= 26) 1096 ptr = encode_disk_name(ptr, n / 26 - 1); 1097 *ptr = 'a' + n % 26; 1098 return ptr + 1; 1099 } 1100 1101 static int xlvbd_alloc_gendisk(blkif_sector_t capacity, 1102 struct blkfront_info *info, 1103 u16 vdisk_info, u16 sector_size, 1104 unsigned int physical_sector_size) 1105 { 1106 struct gendisk *gd; 1107 int nr_minors = 1; 1108 int err; 1109 unsigned int offset; 1110 int minor; 1111 int nr_parts; 1112 char *ptr; 1113 1114 BUG_ON(info->gd != NULL); 1115 BUG_ON(info->rq != NULL); 1116 1117 if ((info->vdevice>>EXT_SHIFT) > 1) { 1118 /* this is above the extended range; something is wrong */ 1119 printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice); 1120 return -ENODEV; 1121 } 1122 1123 if (!VDEV_IS_EXTENDED(info->vdevice)) { 1124 err = xen_translate_vdev(info->vdevice, &minor, &offset); 1125 if (err) 1126 return err; 1127 nr_parts = PARTS_PER_DISK; 1128 } else { 1129 minor = BLKIF_MINOR_EXT(info->vdevice); 1130 nr_parts = PARTS_PER_EXT_DISK; 1131 offset = minor / nr_parts; 1132 if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4) 1133 printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with " 1134 "emulated IDE disks,\n\t choose an xvd device name" 1135 "from xvde on\n", info->vdevice); 1136 } 1137 if (minor >> MINORBITS) { 1138 pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n", 1139 info->vdevice, minor); 1140 return -ENODEV; 1141 } 1142 1143 if ((minor % nr_parts) == 0) 1144 nr_minors = nr_parts; 1145 1146 err = xlbd_reserve_minors(minor, nr_minors); 1147 if (err) 1148 goto out; 1149 err = -ENODEV; 1150 1151 gd = alloc_disk(nr_minors); 1152 if (gd == NULL) 1153 goto release; 1154 1155 strcpy(gd->disk_name, DEV_NAME); 1156 ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset); 1157 BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN); 1158 if (nr_minors > 1) 1159 *ptr = 0; 1160 else 1161 snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr, 1162 "%d", minor & (nr_parts - 1)); 1163 1164 gd->major = XENVBD_MAJOR; 1165 gd->first_minor = minor; 1166 gd->fops = &xlvbd_block_fops; 1167 gd->private_data = info; 1168 set_capacity(gd, capacity); 1169 1170 if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) { 1171 del_gendisk(gd); 1172 goto release; 1173 } 1174 1175 xlvbd_flush(info); 1176 1177 if (vdisk_info & VDISK_READONLY) 1178 set_disk_ro(gd, 1); 1179 1180 if (vdisk_info & VDISK_REMOVABLE) 1181 gd->flags |= GENHD_FL_REMOVABLE; 1182 1183 if (vdisk_info & VDISK_CDROM) 1184 gd->flags |= GENHD_FL_CD; 1185 1186 return 0; 1187 1188 release: 1189 xlbd_release_minors(minor, nr_minors); 1190 out: 1191 return err; 1192 } 1193 1194 static void xlvbd_release_gendisk(struct blkfront_info *info) 1195 { 1196 unsigned int minor, nr_minors, i; 1197 struct blkfront_ring_info *rinfo; 1198 1199 if (info->rq == NULL) 1200 return; 1201 1202 /* No more blkif_request(). */ 1203 blk_mq_stop_hw_queues(info->rq); 1204 1205 for_each_rinfo(info, rinfo, i) { 1206 /* No more gnttab callback work. */ 1207 gnttab_cancel_free_callback(&rinfo->callback); 1208 1209 /* Flush gnttab callback work. Must be done with no locks held. */ 1210 flush_work(&rinfo->work); 1211 } 1212 1213 del_gendisk(info->gd); 1214 1215 minor = info->gd->first_minor; 1216 nr_minors = info->gd->minors; 1217 xlbd_release_minors(minor, nr_minors); 1218 1219 blk_cleanup_queue(info->rq); 1220 blk_mq_free_tag_set(&info->tag_set); 1221 info->rq = NULL; 1222 1223 put_disk(info->gd); 1224 info->gd = NULL; 1225 } 1226 1227 /* Already hold rinfo->ring_lock. */ 1228 static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo) 1229 { 1230 if (!RING_FULL(&rinfo->ring)) 1231 blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true); 1232 } 1233 1234 static void kick_pending_request_queues(struct blkfront_ring_info *rinfo) 1235 { 1236 unsigned long flags; 1237 1238 spin_lock_irqsave(&rinfo->ring_lock, flags); 1239 kick_pending_request_queues_locked(rinfo); 1240 spin_unlock_irqrestore(&rinfo->ring_lock, flags); 1241 } 1242 1243 static void blkif_restart_queue(struct work_struct *work) 1244 { 1245 struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work); 1246 1247 if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED) 1248 kick_pending_request_queues(rinfo); 1249 } 1250 1251 static void blkif_free_ring(struct blkfront_ring_info *rinfo) 1252 { 1253 struct grant *persistent_gnt, *n; 1254 struct blkfront_info *info = rinfo->dev_info; 1255 int i, j, segs; 1256 1257 /* 1258 * Remove indirect pages, this only happens when using indirect 1259 * descriptors but not persistent grants 1260 */ 1261 if (!list_empty(&rinfo->indirect_pages)) { 1262 struct page *indirect_page, *n; 1263 1264 BUG_ON(info->feature_persistent); 1265 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { 1266 list_del(&indirect_page->lru); 1267 __free_page(indirect_page); 1268 } 1269 } 1270 1271 /* Remove all persistent grants. */ 1272 if (!list_empty(&rinfo->grants)) { 1273 list_for_each_entry_safe(persistent_gnt, n, 1274 &rinfo->grants, node) { 1275 list_del(&persistent_gnt->node); 1276 if (persistent_gnt->gref != GRANT_INVALID_REF) { 1277 gnttab_end_foreign_access(persistent_gnt->gref, 1278 0, 0UL); 1279 rinfo->persistent_gnts_c--; 1280 } 1281 if (info->feature_persistent) 1282 __free_page(persistent_gnt->page); 1283 kfree(persistent_gnt); 1284 } 1285 } 1286 BUG_ON(rinfo->persistent_gnts_c != 0); 1287 1288 for (i = 0; i < BLK_RING_SIZE(info); i++) { 1289 /* 1290 * Clear persistent grants present in requests already 1291 * on the shared ring 1292 */ 1293 if (!rinfo->shadow[i].request) 1294 goto free_shadow; 1295 1296 segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ? 1297 rinfo->shadow[i].req.u.indirect.nr_segments : 1298 rinfo->shadow[i].req.u.rw.nr_segments; 1299 for (j = 0; j < segs; j++) { 1300 persistent_gnt = rinfo->shadow[i].grants_used[j]; 1301 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); 1302 if (info->feature_persistent) 1303 __free_page(persistent_gnt->page); 1304 kfree(persistent_gnt); 1305 } 1306 1307 if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT) 1308 /* 1309 * If this is not an indirect operation don't try to 1310 * free indirect segments 1311 */ 1312 goto free_shadow; 1313 1314 for (j = 0; j < INDIRECT_GREFS(segs); j++) { 1315 persistent_gnt = rinfo->shadow[i].indirect_grants[j]; 1316 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); 1317 __free_page(persistent_gnt->page); 1318 kfree(persistent_gnt); 1319 } 1320 1321 free_shadow: 1322 kvfree(rinfo->shadow[i].grants_used); 1323 rinfo->shadow[i].grants_used = NULL; 1324 kvfree(rinfo->shadow[i].indirect_grants); 1325 rinfo->shadow[i].indirect_grants = NULL; 1326 kvfree(rinfo->shadow[i].sg); 1327 rinfo->shadow[i].sg = NULL; 1328 } 1329 1330 /* No more gnttab callback work. */ 1331 gnttab_cancel_free_callback(&rinfo->callback); 1332 1333 /* Flush gnttab callback work. Must be done with no locks held. */ 1334 flush_work(&rinfo->work); 1335 1336 /* Free resources associated with old device channel. */ 1337 for (i = 0; i < info->nr_ring_pages; i++) { 1338 if (rinfo->ring_ref[i] != GRANT_INVALID_REF) { 1339 gnttab_end_foreign_access(rinfo->ring_ref[i], 0, 0); 1340 rinfo->ring_ref[i] = GRANT_INVALID_REF; 1341 } 1342 } 1343 free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE)); 1344 rinfo->ring.sring = NULL; 1345 1346 if (rinfo->irq) 1347 unbind_from_irqhandler(rinfo->irq, rinfo); 1348 rinfo->evtchn = rinfo->irq = 0; 1349 } 1350 1351 static void blkif_free(struct blkfront_info *info, int suspend) 1352 { 1353 unsigned int i; 1354 struct blkfront_ring_info *rinfo; 1355 1356 /* Prevent new requests being issued until we fix things up. */ 1357 info->connected = suspend ? 1358 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; 1359 /* No more blkif_request(). */ 1360 if (info->rq) 1361 blk_mq_stop_hw_queues(info->rq); 1362 1363 for_each_rinfo(info, rinfo, i) 1364 blkif_free_ring(rinfo); 1365 1366 kvfree(info->rinfo); 1367 info->rinfo = NULL; 1368 info->nr_rings = 0; 1369 } 1370 1371 struct copy_from_grant { 1372 const struct blk_shadow *s; 1373 unsigned int grant_idx; 1374 unsigned int bvec_offset; 1375 char *bvec_data; 1376 }; 1377 1378 static void blkif_copy_from_grant(unsigned long gfn, unsigned int offset, 1379 unsigned int len, void *data) 1380 { 1381 struct copy_from_grant *info = data; 1382 char *shared_data; 1383 /* Convenient aliases */ 1384 const struct blk_shadow *s = info->s; 1385 1386 shared_data = kmap_atomic(s->grants_used[info->grant_idx]->page); 1387 1388 memcpy(info->bvec_data + info->bvec_offset, 1389 shared_data + offset, len); 1390 1391 info->bvec_offset += len; 1392 info->grant_idx++; 1393 1394 kunmap_atomic(shared_data); 1395 } 1396 1397 static enum blk_req_status blkif_rsp_to_req_status(int rsp) 1398 { 1399 switch (rsp) 1400 { 1401 case BLKIF_RSP_OKAY: 1402 return REQ_DONE; 1403 case BLKIF_RSP_EOPNOTSUPP: 1404 return REQ_EOPNOTSUPP; 1405 case BLKIF_RSP_ERROR: 1406 default: 1407 return REQ_ERROR; 1408 } 1409 } 1410 1411 /* 1412 * Get the final status of the block request based on two ring response 1413 */ 1414 static int blkif_get_final_status(enum blk_req_status s1, 1415 enum blk_req_status s2) 1416 { 1417 BUG_ON(s1 == REQ_WAITING); 1418 BUG_ON(s2 == REQ_WAITING); 1419 1420 if (s1 == REQ_ERROR || s2 == REQ_ERROR) 1421 return BLKIF_RSP_ERROR; 1422 else if (s1 == REQ_EOPNOTSUPP || s2 == REQ_EOPNOTSUPP) 1423 return BLKIF_RSP_EOPNOTSUPP; 1424 return BLKIF_RSP_OKAY; 1425 } 1426 1427 static bool blkif_completion(unsigned long *id, 1428 struct blkfront_ring_info *rinfo, 1429 struct blkif_response *bret) 1430 { 1431 int i = 0; 1432 struct scatterlist *sg; 1433 int num_sg, num_grant; 1434 struct blkfront_info *info = rinfo->dev_info; 1435 struct blk_shadow *s = &rinfo->shadow[*id]; 1436 struct copy_from_grant data = { 1437 .grant_idx = 0, 1438 }; 1439 1440 num_grant = s->req.operation == BLKIF_OP_INDIRECT ? 1441 s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments; 1442 1443 /* The I/O request may be split in two. */ 1444 if (unlikely(s->associated_id != NO_ASSOCIATED_ID)) { 1445 struct blk_shadow *s2 = &rinfo->shadow[s->associated_id]; 1446 1447 /* Keep the status of the current response in shadow. */ 1448 s->status = blkif_rsp_to_req_status(bret->status); 1449 1450 /* Wait the second response if not yet here. */ 1451 if (s2->status == REQ_WAITING) 1452 return false; 1453 1454 bret->status = blkif_get_final_status(s->status, 1455 s2->status); 1456 1457 /* 1458 * All the grants is stored in the first shadow in order 1459 * to make the completion code simpler. 1460 */ 1461 num_grant += s2->req.u.rw.nr_segments; 1462 1463 /* 1464 * The two responses may not come in order. Only the 1465 * first request will store the scatter-gather list. 1466 */ 1467 if (s2->num_sg != 0) { 1468 /* Update "id" with the ID of the first response. */ 1469 *id = s->associated_id; 1470 s = s2; 1471 } 1472 1473 /* 1474 * We don't need anymore the second request, so recycling 1475 * it now. 1476 */ 1477 if (add_id_to_freelist(rinfo, s->associated_id)) 1478 WARN(1, "%s: can't recycle the second part (id = %ld) of the request\n", 1479 info->gd->disk_name, s->associated_id); 1480 } 1481 1482 data.s = s; 1483 num_sg = s->num_sg; 1484 1485 if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { 1486 for_each_sg(s->sg, sg, num_sg, i) { 1487 BUG_ON(sg->offset + sg->length > PAGE_SIZE); 1488 1489 data.bvec_offset = sg->offset; 1490 data.bvec_data = kmap_atomic(sg_page(sg)); 1491 1492 gnttab_foreach_grant_in_range(sg_page(sg), 1493 sg->offset, 1494 sg->length, 1495 blkif_copy_from_grant, 1496 &data); 1497 1498 kunmap_atomic(data.bvec_data); 1499 } 1500 } 1501 /* Add the persistent grant into the list of free grants */ 1502 for (i = 0; i < num_grant; i++) { 1503 if (gnttab_query_foreign_access(s->grants_used[i]->gref)) { 1504 /* 1505 * If the grant is still mapped by the backend (the 1506 * backend has chosen to make this grant persistent) 1507 * we add it at the head of the list, so it will be 1508 * reused first. 1509 */ 1510 if (!info->feature_persistent) 1511 pr_alert_ratelimited("backed has not unmapped grant: %u\n", 1512 s->grants_used[i]->gref); 1513 list_add(&s->grants_used[i]->node, &rinfo->grants); 1514 rinfo->persistent_gnts_c++; 1515 } else { 1516 /* 1517 * If the grant is not mapped by the backend we end the 1518 * foreign access and add it to the tail of the list, 1519 * so it will not be picked again unless we run out of 1520 * persistent grants. 1521 */ 1522 gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL); 1523 s->grants_used[i]->gref = GRANT_INVALID_REF; 1524 list_add_tail(&s->grants_used[i]->node, &rinfo->grants); 1525 } 1526 } 1527 if (s->req.operation == BLKIF_OP_INDIRECT) { 1528 for (i = 0; i < INDIRECT_GREFS(num_grant); i++) { 1529 if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { 1530 if (!info->feature_persistent) 1531 pr_alert_ratelimited("backed has not unmapped grant: %u\n", 1532 s->indirect_grants[i]->gref); 1533 list_add(&s->indirect_grants[i]->node, &rinfo->grants); 1534 rinfo->persistent_gnts_c++; 1535 } else { 1536 struct page *indirect_page; 1537 1538 gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL); 1539 /* 1540 * Add the used indirect page back to the list of 1541 * available pages for indirect grefs. 1542 */ 1543 if (!info->feature_persistent) { 1544 indirect_page = s->indirect_grants[i]->page; 1545 list_add(&indirect_page->lru, &rinfo->indirect_pages); 1546 } 1547 s->indirect_grants[i]->gref = GRANT_INVALID_REF; 1548 list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants); 1549 } 1550 } 1551 } 1552 1553 return true; 1554 } 1555 1556 static irqreturn_t blkif_interrupt(int irq, void *dev_id) 1557 { 1558 struct request *req; 1559 struct blkif_response *bret; 1560 RING_IDX i, rp; 1561 unsigned long flags; 1562 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id; 1563 struct blkfront_info *info = rinfo->dev_info; 1564 1565 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) 1566 return IRQ_HANDLED; 1567 1568 spin_lock_irqsave(&rinfo->ring_lock, flags); 1569 again: 1570 rp = rinfo->ring.sring->rsp_prod; 1571 rmb(); /* Ensure we see queued responses up to 'rp'. */ 1572 1573 for (i = rinfo->ring.rsp_cons; i != rp; i++) { 1574 unsigned long id; 1575 1576 bret = RING_GET_RESPONSE(&rinfo->ring, i); 1577 id = bret->id; 1578 /* 1579 * The backend has messed up and given us an id that we would 1580 * never have given to it (we stamp it up to BLK_RING_SIZE - 1581 * look in get_id_from_freelist. 1582 */ 1583 if (id >= BLK_RING_SIZE(info)) { 1584 WARN(1, "%s: response to %s has incorrect id (%ld)\n", 1585 info->gd->disk_name, op_name(bret->operation), id); 1586 /* We can't safely get the 'struct request' as 1587 * the id is busted. */ 1588 continue; 1589 } 1590 req = rinfo->shadow[id].request; 1591 1592 if (bret->operation != BLKIF_OP_DISCARD) { 1593 /* 1594 * We may need to wait for an extra response if the 1595 * I/O request is split in 2 1596 */ 1597 if (!blkif_completion(&id, rinfo, bret)) 1598 continue; 1599 } 1600 1601 if (add_id_to_freelist(rinfo, id)) { 1602 WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n", 1603 info->gd->disk_name, op_name(bret->operation), id); 1604 continue; 1605 } 1606 1607 if (bret->status == BLKIF_RSP_OKAY) 1608 blkif_req(req)->error = BLK_STS_OK; 1609 else 1610 blkif_req(req)->error = BLK_STS_IOERR; 1611 1612 switch (bret->operation) { 1613 case BLKIF_OP_DISCARD: 1614 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 1615 struct request_queue *rq = info->rq; 1616 printk(KERN_WARNING "blkfront: %s: %s op failed\n", 1617 info->gd->disk_name, op_name(bret->operation)); 1618 blkif_req(req)->error = BLK_STS_NOTSUPP; 1619 info->feature_discard = 0; 1620 info->feature_secdiscard = 0; 1621 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, rq); 1622 blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq); 1623 } 1624 break; 1625 case BLKIF_OP_FLUSH_DISKCACHE: 1626 case BLKIF_OP_WRITE_BARRIER: 1627 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 1628 printk(KERN_WARNING "blkfront: %s: %s op failed\n", 1629 info->gd->disk_name, op_name(bret->operation)); 1630 blkif_req(req)->error = BLK_STS_NOTSUPP; 1631 } 1632 if (unlikely(bret->status == BLKIF_RSP_ERROR && 1633 rinfo->shadow[id].req.u.rw.nr_segments == 0)) { 1634 printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", 1635 info->gd->disk_name, op_name(bret->operation)); 1636 blkif_req(req)->error = BLK_STS_NOTSUPP; 1637 } 1638 if (unlikely(blkif_req(req)->error)) { 1639 if (blkif_req(req)->error == BLK_STS_NOTSUPP) 1640 blkif_req(req)->error = BLK_STS_OK; 1641 info->feature_fua = 0; 1642 info->feature_flush = 0; 1643 xlvbd_flush(info); 1644 } 1645 fallthrough; 1646 case BLKIF_OP_READ: 1647 case BLKIF_OP_WRITE: 1648 if (unlikely(bret->status != BLKIF_RSP_OKAY)) 1649 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " 1650 "request: %x\n", bret->status); 1651 1652 break; 1653 default: 1654 BUG(); 1655 } 1656 1657 if (likely(!blk_should_fake_timeout(req->q))) 1658 blk_mq_complete_request(req); 1659 } 1660 1661 rinfo->ring.rsp_cons = i; 1662 1663 if (i != rinfo->ring.req_prod_pvt) { 1664 int more_to_do; 1665 RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do); 1666 if (more_to_do) 1667 goto again; 1668 } else 1669 rinfo->ring.sring->rsp_event = i + 1; 1670 1671 kick_pending_request_queues_locked(rinfo); 1672 1673 spin_unlock_irqrestore(&rinfo->ring_lock, flags); 1674 1675 return IRQ_HANDLED; 1676 } 1677 1678 1679 static int setup_blkring(struct xenbus_device *dev, 1680 struct blkfront_ring_info *rinfo) 1681 { 1682 struct blkif_sring *sring; 1683 int err, i; 1684 struct blkfront_info *info = rinfo->dev_info; 1685 unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE; 1686 grant_ref_t gref[XENBUS_MAX_RING_GRANTS]; 1687 1688 for (i = 0; i < info->nr_ring_pages; i++) 1689 rinfo->ring_ref[i] = GRANT_INVALID_REF; 1690 1691 sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH, 1692 get_order(ring_size)); 1693 if (!sring) { 1694 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); 1695 return -ENOMEM; 1696 } 1697 SHARED_RING_INIT(sring); 1698 FRONT_RING_INIT(&rinfo->ring, sring, ring_size); 1699 1700 err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref); 1701 if (err < 0) { 1702 free_pages((unsigned long)sring, get_order(ring_size)); 1703 rinfo->ring.sring = NULL; 1704 goto fail; 1705 } 1706 for (i = 0; i < info->nr_ring_pages; i++) 1707 rinfo->ring_ref[i] = gref[i]; 1708 1709 err = xenbus_alloc_evtchn(dev, &rinfo->evtchn); 1710 if (err) 1711 goto fail; 1712 1713 err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0, 1714 "blkif", rinfo); 1715 if (err <= 0) { 1716 xenbus_dev_fatal(dev, err, 1717 "bind_evtchn_to_irqhandler failed"); 1718 goto fail; 1719 } 1720 rinfo->irq = err; 1721 1722 return 0; 1723 fail: 1724 blkif_free(info, 0); 1725 return err; 1726 } 1727 1728 /* 1729 * Write out per-ring/queue nodes including ring-ref and event-channel, and each 1730 * ring buffer may have multi pages depending on ->nr_ring_pages. 1731 */ 1732 static int write_per_ring_nodes(struct xenbus_transaction xbt, 1733 struct blkfront_ring_info *rinfo, const char *dir) 1734 { 1735 int err; 1736 unsigned int i; 1737 const char *message = NULL; 1738 struct blkfront_info *info = rinfo->dev_info; 1739 1740 if (info->nr_ring_pages == 1) { 1741 err = xenbus_printf(xbt, dir, "ring-ref", "%u", rinfo->ring_ref[0]); 1742 if (err) { 1743 message = "writing ring-ref"; 1744 goto abort_transaction; 1745 } 1746 } else { 1747 for (i = 0; i < info->nr_ring_pages; i++) { 1748 char ring_ref_name[RINGREF_NAME_LEN]; 1749 1750 snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i); 1751 err = xenbus_printf(xbt, dir, ring_ref_name, 1752 "%u", rinfo->ring_ref[i]); 1753 if (err) { 1754 message = "writing ring-ref"; 1755 goto abort_transaction; 1756 } 1757 } 1758 } 1759 1760 err = xenbus_printf(xbt, dir, "event-channel", "%u", rinfo->evtchn); 1761 if (err) { 1762 message = "writing event-channel"; 1763 goto abort_transaction; 1764 } 1765 1766 return 0; 1767 1768 abort_transaction: 1769 xenbus_transaction_end(xbt, 1); 1770 if (message) 1771 xenbus_dev_fatal(info->xbdev, err, "%s", message); 1772 1773 return err; 1774 } 1775 1776 static void free_info(struct blkfront_info *info) 1777 { 1778 list_del(&info->info_list); 1779 kfree(info); 1780 } 1781 1782 /* Common code used when first setting up, and when resuming. */ 1783 static int talk_to_blkback(struct xenbus_device *dev, 1784 struct blkfront_info *info) 1785 { 1786 const char *message = NULL; 1787 struct xenbus_transaction xbt; 1788 int err; 1789 unsigned int i, max_page_order; 1790 unsigned int ring_page_order; 1791 struct blkfront_ring_info *rinfo; 1792 1793 if (!info) 1794 return -ENODEV; 1795 1796 max_page_order = xenbus_read_unsigned(info->xbdev->otherend, 1797 "max-ring-page-order", 0); 1798 ring_page_order = min(xen_blkif_max_ring_order, max_page_order); 1799 info->nr_ring_pages = 1 << ring_page_order; 1800 1801 err = negotiate_mq(info); 1802 if (err) 1803 goto destroy_blkring; 1804 1805 for_each_rinfo(info, rinfo, i) { 1806 /* Create shared ring, alloc event channel. */ 1807 err = setup_blkring(dev, rinfo); 1808 if (err) 1809 goto destroy_blkring; 1810 } 1811 1812 again: 1813 err = xenbus_transaction_start(&xbt); 1814 if (err) { 1815 xenbus_dev_fatal(dev, err, "starting transaction"); 1816 goto destroy_blkring; 1817 } 1818 1819 if (info->nr_ring_pages > 1) { 1820 err = xenbus_printf(xbt, dev->nodename, "ring-page-order", "%u", 1821 ring_page_order); 1822 if (err) { 1823 message = "writing ring-page-order"; 1824 goto abort_transaction; 1825 } 1826 } 1827 1828 /* We already got the number of queues/rings in _probe */ 1829 if (info->nr_rings == 1) { 1830 err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename); 1831 if (err) 1832 goto destroy_blkring; 1833 } else { 1834 char *path; 1835 size_t pathsize; 1836 1837 err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", "%u", 1838 info->nr_rings); 1839 if (err) { 1840 message = "writing multi-queue-num-queues"; 1841 goto abort_transaction; 1842 } 1843 1844 pathsize = strlen(dev->nodename) + QUEUE_NAME_LEN; 1845 path = kmalloc(pathsize, GFP_KERNEL); 1846 if (!path) { 1847 err = -ENOMEM; 1848 message = "ENOMEM while writing ring references"; 1849 goto abort_transaction; 1850 } 1851 1852 for_each_rinfo(info, rinfo, i) { 1853 memset(path, 0, pathsize); 1854 snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i); 1855 err = write_per_ring_nodes(xbt, rinfo, path); 1856 if (err) { 1857 kfree(path); 1858 goto destroy_blkring; 1859 } 1860 } 1861 kfree(path); 1862 } 1863 err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", 1864 XEN_IO_PROTO_ABI_NATIVE); 1865 if (err) { 1866 message = "writing protocol"; 1867 goto abort_transaction; 1868 } 1869 err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1870 info->feature_persistent); 1871 if (err) 1872 dev_warn(&dev->dev, 1873 "writing persistent grants feature to xenbus"); 1874 1875 err = xenbus_transaction_end(xbt, 0); 1876 if (err) { 1877 if (err == -EAGAIN) 1878 goto again; 1879 xenbus_dev_fatal(dev, err, "completing transaction"); 1880 goto destroy_blkring; 1881 } 1882 1883 for_each_rinfo(info, rinfo, i) { 1884 unsigned int j; 1885 1886 for (j = 0; j < BLK_RING_SIZE(info); j++) 1887 rinfo->shadow[j].req.u.rw.id = j + 1; 1888 rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff; 1889 } 1890 xenbus_switch_state(dev, XenbusStateInitialised); 1891 1892 return 0; 1893 1894 abort_transaction: 1895 xenbus_transaction_end(xbt, 1); 1896 if (message) 1897 xenbus_dev_fatal(dev, err, "%s", message); 1898 destroy_blkring: 1899 blkif_free(info, 0); 1900 1901 mutex_lock(&blkfront_mutex); 1902 free_info(info); 1903 mutex_unlock(&blkfront_mutex); 1904 1905 dev_set_drvdata(&dev->dev, NULL); 1906 1907 return err; 1908 } 1909 1910 static int negotiate_mq(struct blkfront_info *info) 1911 { 1912 unsigned int backend_max_queues; 1913 unsigned int i; 1914 struct blkfront_ring_info *rinfo; 1915 1916 BUG_ON(info->nr_rings); 1917 1918 /* Check if backend supports multiple queues. */ 1919 backend_max_queues = xenbus_read_unsigned(info->xbdev->otherend, 1920 "multi-queue-max-queues", 1); 1921 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues); 1922 /* We need at least one ring. */ 1923 if (!info->nr_rings) 1924 info->nr_rings = 1; 1925 1926 info->rinfo_size = struct_size(info->rinfo, shadow, 1927 BLK_RING_SIZE(info)); 1928 info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL); 1929 if (!info->rinfo) { 1930 xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure"); 1931 info->nr_rings = 0; 1932 return -ENOMEM; 1933 } 1934 1935 for_each_rinfo(info, rinfo, i) { 1936 INIT_LIST_HEAD(&rinfo->indirect_pages); 1937 INIT_LIST_HEAD(&rinfo->grants); 1938 rinfo->dev_info = info; 1939 INIT_WORK(&rinfo->work, blkif_restart_queue); 1940 spin_lock_init(&rinfo->ring_lock); 1941 } 1942 return 0; 1943 } 1944 1945 /* Enable the persistent grants feature. */ 1946 static bool feature_persistent = true; 1947 module_param(feature_persistent, bool, 0644); 1948 MODULE_PARM_DESC(feature_persistent, 1949 "Enables the persistent grants feature"); 1950 1951 /** 1952 * Entry point to this code when a new device is created. Allocate the basic 1953 * structures and the ring buffer for communication with the backend, and 1954 * inform the backend of the appropriate details for those. Switch to 1955 * Initialised state. 1956 */ 1957 static int blkfront_probe(struct xenbus_device *dev, 1958 const struct xenbus_device_id *id) 1959 { 1960 int err, vdevice; 1961 struct blkfront_info *info; 1962 1963 /* FIXME: Use dynamic device id if this is not set. */ 1964 err = xenbus_scanf(XBT_NIL, dev->nodename, 1965 "virtual-device", "%i", &vdevice); 1966 if (err != 1) { 1967 /* go looking in the extended area instead */ 1968 err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", 1969 "%i", &vdevice); 1970 if (err != 1) { 1971 xenbus_dev_fatal(dev, err, "reading virtual-device"); 1972 return err; 1973 } 1974 } 1975 1976 if (xen_hvm_domain()) { 1977 char *type; 1978 int len; 1979 /* no unplug has been done: do not hook devices != xen vbds */ 1980 if (xen_has_pv_and_legacy_disk_devices()) { 1981 int major; 1982 1983 if (!VDEV_IS_EXTENDED(vdevice)) 1984 major = BLKIF_MAJOR(vdevice); 1985 else 1986 major = XENVBD_MAJOR; 1987 1988 if (major != XENVBD_MAJOR) { 1989 printk(KERN_INFO 1990 "%s: HVM does not support vbd %d as xen block device\n", 1991 __func__, vdevice); 1992 return -ENODEV; 1993 } 1994 } 1995 /* do not create a PV cdrom device if we are an HVM guest */ 1996 type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len); 1997 if (IS_ERR(type)) 1998 return -ENODEV; 1999 if (strncmp(type, "cdrom", 5) == 0) { 2000 kfree(type); 2001 return -ENODEV; 2002 } 2003 kfree(type); 2004 } 2005 info = kzalloc(sizeof(*info), GFP_KERNEL); 2006 if (!info) { 2007 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); 2008 return -ENOMEM; 2009 } 2010 2011 info->xbdev = dev; 2012 2013 mutex_init(&info->mutex); 2014 info->vdevice = vdevice; 2015 info->connected = BLKIF_STATE_DISCONNECTED; 2016 2017 info->feature_persistent = feature_persistent; 2018 2019 /* Front end dir is a number, which is used as the id. */ 2020 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); 2021 dev_set_drvdata(&dev->dev, info); 2022 2023 mutex_lock(&blkfront_mutex); 2024 list_add(&info->info_list, &info_list); 2025 mutex_unlock(&blkfront_mutex); 2026 2027 return 0; 2028 } 2029 2030 static int blkif_recover(struct blkfront_info *info) 2031 { 2032 unsigned int r_index; 2033 struct request *req, *n; 2034 int rc; 2035 struct bio *bio; 2036 unsigned int segs; 2037 struct blkfront_ring_info *rinfo; 2038 2039 blkfront_gather_backend_features(info); 2040 /* Reset limits changed by blk_mq_update_nr_hw_queues(). */ 2041 blkif_set_queue_limits(info); 2042 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; 2043 blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG); 2044 2045 for_each_rinfo(info, rinfo, r_index) { 2046 rc = blkfront_setup_indirect(rinfo); 2047 if (rc) 2048 return rc; 2049 } 2050 xenbus_switch_state(info->xbdev, XenbusStateConnected); 2051 2052 /* Now safe for us to use the shared ring */ 2053 info->connected = BLKIF_STATE_CONNECTED; 2054 2055 for_each_rinfo(info, rinfo, r_index) { 2056 /* Kick any other new requests queued since we resumed */ 2057 kick_pending_request_queues(rinfo); 2058 } 2059 2060 list_for_each_entry_safe(req, n, &info->requests, queuelist) { 2061 /* Requeue pending requests (flush or discard) */ 2062 list_del_init(&req->queuelist); 2063 BUG_ON(req->nr_phys_segments > segs); 2064 blk_mq_requeue_request(req, false); 2065 } 2066 blk_mq_start_stopped_hw_queues(info->rq, true); 2067 blk_mq_kick_requeue_list(info->rq); 2068 2069 while ((bio = bio_list_pop(&info->bio_list)) != NULL) { 2070 /* Traverse the list of pending bios and re-queue them */ 2071 submit_bio(bio); 2072 } 2073 2074 return 0; 2075 } 2076 2077 /** 2078 * We are reconnecting to the backend, due to a suspend/resume, or a backend 2079 * driver restart. We tear down our blkif structure and recreate it, but 2080 * leave the device-layer structures intact so that this is transparent to the 2081 * rest of the kernel. 2082 */ 2083 static int blkfront_resume(struct xenbus_device *dev) 2084 { 2085 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 2086 int err = 0; 2087 unsigned int i, j; 2088 struct blkfront_ring_info *rinfo; 2089 2090 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); 2091 2092 bio_list_init(&info->bio_list); 2093 INIT_LIST_HEAD(&info->requests); 2094 for_each_rinfo(info, rinfo, i) { 2095 struct bio_list merge_bio; 2096 struct blk_shadow *shadow = rinfo->shadow; 2097 2098 for (j = 0; j < BLK_RING_SIZE(info); j++) { 2099 /* Not in use? */ 2100 if (!shadow[j].request) 2101 continue; 2102 2103 /* 2104 * Get the bios in the request so we can re-queue them. 2105 */ 2106 if (req_op(shadow[j].request) == REQ_OP_FLUSH || 2107 req_op(shadow[j].request) == REQ_OP_DISCARD || 2108 req_op(shadow[j].request) == REQ_OP_SECURE_ERASE || 2109 shadow[j].request->cmd_flags & REQ_FUA) { 2110 /* 2111 * Flush operations don't contain bios, so 2112 * we need to requeue the whole request 2113 * 2114 * XXX: but this doesn't make any sense for a 2115 * write with the FUA flag set.. 2116 */ 2117 list_add(&shadow[j].request->queuelist, &info->requests); 2118 continue; 2119 } 2120 merge_bio.head = shadow[j].request->bio; 2121 merge_bio.tail = shadow[j].request->biotail; 2122 bio_list_merge(&info->bio_list, &merge_bio); 2123 shadow[j].request->bio = NULL; 2124 blk_mq_end_request(shadow[j].request, BLK_STS_OK); 2125 } 2126 } 2127 2128 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); 2129 2130 err = talk_to_blkback(dev, info); 2131 if (!err) 2132 blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); 2133 2134 /* 2135 * We have to wait for the backend to switch to 2136 * connected state, since we want to read which 2137 * features it supports. 2138 */ 2139 2140 return err; 2141 } 2142 2143 static void blkfront_closing(struct blkfront_info *info) 2144 { 2145 struct xenbus_device *xbdev = info->xbdev; 2146 struct block_device *bdev = NULL; 2147 2148 mutex_lock(&info->mutex); 2149 2150 if (xbdev->state == XenbusStateClosing) { 2151 mutex_unlock(&info->mutex); 2152 return; 2153 } 2154 2155 if (info->gd) 2156 bdev = bdget_disk(info->gd, 0); 2157 2158 mutex_unlock(&info->mutex); 2159 2160 if (!bdev) { 2161 xenbus_frontend_closed(xbdev); 2162 return; 2163 } 2164 2165 mutex_lock(&bdev->bd_mutex); 2166 2167 if (bdev->bd_openers) { 2168 xenbus_dev_error(xbdev, -EBUSY, 2169 "Device in use; refusing to close"); 2170 xenbus_switch_state(xbdev, XenbusStateClosing); 2171 } else { 2172 xlvbd_release_gendisk(info); 2173 xenbus_frontend_closed(xbdev); 2174 } 2175 2176 mutex_unlock(&bdev->bd_mutex); 2177 bdput(bdev); 2178 } 2179 2180 static void blkfront_setup_discard(struct blkfront_info *info) 2181 { 2182 int err; 2183 unsigned int discard_granularity; 2184 unsigned int discard_alignment; 2185 2186 info->feature_discard = 1; 2187 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 2188 "discard-granularity", "%u", &discard_granularity, 2189 "discard-alignment", "%u", &discard_alignment, 2190 NULL); 2191 if (!err) { 2192 info->discard_granularity = discard_granularity; 2193 info->discard_alignment = discard_alignment; 2194 } 2195 info->feature_secdiscard = 2196 !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure", 2197 0); 2198 } 2199 2200 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) 2201 { 2202 unsigned int psegs, grants, memflags; 2203 int err, i; 2204 struct blkfront_info *info = rinfo->dev_info; 2205 2206 memflags = memalloc_noio_save(); 2207 2208 if (info->max_indirect_segments == 0) { 2209 if (!HAS_EXTRA_REQ) 2210 grants = BLKIF_MAX_SEGMENTS_PER_REQUEST; 2211 else { 2212 /* 2213 * When an extra req is required, the maximum 2214 * grants supported is related to the size of the 2215 * Linux block segment. 2216 */ 2217 grants = GRANTS_PER_PSEG; 2218 } 2219 } 2220 else 2221 grants = info->max_indirect_segments; 2222 psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG); 2223 2224 err = fill_grant_buffer(rinfo, 2225 (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info)); 2226 if (err) 2227 goto out_of_memory; 2228 2229 if (!info->feature_persistent && info->max_indirect_segments) { 2230 /* 2231 * We are using indirect descriptors but not persistent 2232 * grants, we need to allocate a set of pages that can be 2233 * used for mapping indirect grefs 2234 */ 2235 int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info); 2236 2237 BUG_ON(!list_empty(&rinfo->indirect_pages)); 2238 for (i = 0; i < num; i++) { 2239 struct page *indirect_page = alloc_page(GFP_KERNEL); 2240 if (!indirect_page) 2241 goto out_of_memory; 2242 list_add(&indirect_page->lru, &rinfo->indirect_pages); 2243 } 2244 } 2245 2246 for (i = 0; i < BLK_RING_SIZE(info); i++) { 2247 rinfo->shadow[i].grants_used = 2248 kvcalloc(grants, 2249 sizeof(rinfo->shadow[i].grants_used[0]), 2250 GFP_KERNEL); 2251 rinfo->shadow[i].sg = kvcalloc(psegs, 2252 sizeof(rinfo->shadow[i].sg[0]), 2253 GFP_KERNEL); 2254 if (info->max_indirect_segments) 2255 rinfo->shadow[i].indirect_grants = 2256 kvcalloc(INDIRECT_GREFS(grants), 2257 sizeof(rinfo->shadow[i].indirect_grants[0]), 2258 GFP_KERNEL); 2259 if ((rinfo->shadow[i].grants_used == NULL) || 2260 (rinfo->shadow[i].sg == NULL) || 2261 (info->max_indirect_segments && 2262 (rinfo->shadow[i].indirect_grants == NULL))) 2263 goto out_of_memory; 2264 sg_init_table(rinfo->shadow[i].sg, psegs); 2265 } 2266 2267 memalloc_noio_restore(memflags); 2268 2269 return 0; 2270 2271 out_of_memory: 2272 for (i = 0; i < BLK_RING_SIZE(info); i++) { 2273 kvfree(rinfo->shadow[i].grants_used); 2274 rinfo->shadow[i].grants_used = NULL; 2275 kvfree(rinfo->shadow[i].sg); 2276 rinfo->shadow[i].sg = NULL; 2277 kvfree(rinfo->shadow[i].indirect_grants); 2278 rinfo->shadow[i].indirect_grants = NULL; 2279 } 2280 if (!list_empty(&rinfo->indirect_pages)) { 2281 struct page *indirect_page, *n; 2282 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { 2283 list_del(&indirect_page->lru); 2284 __free_page(indirect_page); 2285 } 2286 } 2287 2288 memalloc_noio_restore(memflags); 2289 2290 return -ENOMEM; 2291 } 2292 2293 /* 2294 * Gather all backend feature-* 2295 */ 2296 static void blkfront_gather_backend_features(struct blkfront_info *info) 2297 { 2298 unsigned int indirect_segments; 2299 2300 info->feature_flush = 0; 2301 info->feature_fua = 0; 2302 2303 /* 2304 * If there's no "feature-barrier" defined, then it means 2305 * we're dealing with a very old backend which writes 2306 * synchronously; nothing to do. 2307 * 2308 * If there are barriers, then we use flush. 2309 */ 2310 if (xenbus_read_unsigned(info->xbdev->otherend, "feature-barrier", 0)) { 2311 info->feature_flush = 1; 2312 info->feature_fua = 1; 2313 } 2314 2315 /* 2316 * And if there is "feature-flush-cache" use that above 2317 * barriers. 2318 */ 2319 if (xenbus_read_unsigned(info->xbdev->otherend, "feature-flush-cache", 2320 0)) { 2321 info->feature_flush = 1; 2322 info->feature_fua = 0; 2323 } 2324 2325 if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0)) 2326 blkfront_setup_discard(info); 2327 2328 if (info->feature_persistent) 2329 info->feature_persistent = 2330 !!xenbus_read_unsigned(info->xbdev->otherend, 2331 "feature-persistent", 0); 2332 2333 indirect_segments = xenbus_read_unsigned(info->xbdev->otherend, 2334 "feature-max-indirect-segments", 0); 2335 if (indirect_segments > xen_blkif_max_segments) 2336 indirect_segments = xen_blkif_max_segments; 2337 if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) 2338 indirect_segments = 0; 2339 info->max_indirect_segments = indirect_segments; 2340 2341 if (info->feature_persistent) { 2342 mutex_lock(&blkfront_mutex); 2343 schedule_delayed_work(&blkfront_work, HZ * 10); 2344 mutex_unlock(&blkfront_mutex); 2345 } 2346 } 2347 2348 /* 2349 * Invoked when the backend is finally 'ready' (and has told produced 2350 * the details about the physical device - #sectors, size, etc). 2351 */ 2352 static void blkfront_connect(struct blkfront_info *info) 2353 { 2354 unsigned long long sectors; 2355 unsigned long sector_size; 2356 unsigned int physical_sector_size; 2357 unsigned int binfo; 2358 int err, i; 2359 struct blkfront_ring_info *rinfo; 2360 2361 switch (info->connected) { 2362 case BLKIF_STATE_CONNECTED: 2363 /* 2364 * Potentially, the back-end may be signalling 2365 * a capacity change; update the capacity. 2366 */ 2367 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 2368 "sectors", "%Lu", §ors); 2369 if (XENBUS_EXIST_ERR(err)) 2370 return; 2371 printk(KERN_INFO "Setting capacity to %Lu\n", 2372 sectors); 2373 set_capacity_revalidate_and_notify(info->gd, sectors, true); 2374 2375 return; 2376 case BLKIF_STATE_SUSPENDED: 2377 /* 2378 * If we are recovering from suspension, we need to wait 2379 * for the backend to announce it's features before 2380 * reconnecting, at least we need to know if the backend 2381 * supports indirect descriptors, and how many. 2382 */ 2383 blkif_recover(info); 2384 return; 2385 2386 default: 2387 break; 2388 } 2389 2390 dev_dbg(&info->xbdev->dev, "%s:%s.\n", 2391 __func__, info->xbdev->otherend); 2392 2393 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 2394 "sectors", "%llu", §ors, 2395 "info", "%u", &binfo, 2396 "sector-size", "%lu", §or_size, 2397 NULL); 2398 if (err) { 2399 xenbus_dev_fatal(info->xbdev, err, 2400 "reading backend fields at %s", 2401 info->xbdev->otherend); 2402 return; 2403 } 2404 2405 /* 2406 * physcial-sector-size is a newer field, so old backends may not 2407 * provide this. Assume physical sector size to be the same as 2408 * sector_size in that case. 2409 */ 2410 physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend, 2411 "physical-sector-size", 2412 sector_size); 2413 blkfront_gather_backend_features(info); 2414 for_each_rinfo(info, rinfo, i) { 2415 err = blkfront_setup_indirect(rinfo); 2416 if (err) { 2417 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", 2418 info->xbdev->otherend); 2419 blkif_free(info, 0); 2420 break; 2421 } 2422 } 2423 2424 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size, 2425 physical_sector_size); 2426 if (err) { 2427 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", 2428 info->xbdev->otherend); 2429 goto fail; 2430 } 2431 2432 xenbus_switch_state(info->xbdev, XenbusStateConnected); 2433 2434 /* Kick pending requests. */ 2435 info->connected = BLKIF_STATE_CONNECTED; 2436 for_each_rinfo(info, rinfo, i) 2437 kick_pending_request_queues(rinfo); 2438 2439 device_add_disk(&info->xbdev->dev, info->gd, NULL); 2440 2441 info->is_ready = 1; 2442 return; 2443 2444 fail: 2445 blkif_free(info, 0); 2446 return; 2447 } 2448 2449 /** 2450 * Callback received when the backend's state changes. 2451 */ 2452 static void blkback_changed(struct xenbus_device *dev, 2453 enum xenbus_state backend_state) 2454 { 2455 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 2456 2457 dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state); 2458 2459 switch (backend_state) { 2460 case XenbusStateInitWait: 2461 if (dev->state != XenbusStateInitialising) 2462 break; 2463 if (talk_to_blkback(dev, info)) 2464 break; 2465 case XenbusStateInitialising: 2466 case XenbusStateInitialised: 2467 case XenbusStateReconfiguring: 2468 case XenbusStateReconfigured: 2469 case XenbusStateUnknown: 2470 break; 2471 2472 case XenbusStateConnected: 2473 /* 2474 * talk_to_blkback sets state to XenbusStateInitialised 2475 * and blkfront_connect sets it to XenbusStateConnected 2476 * (if connection went OK). 2477 * 2478 * If the backend (or toolstack) decides to poke at backend 2479 * state (and re-trigger the watch by setting the state repeatedly 2480 * to XenbusStateConnected (4)) we need to deal with this. 2481 * This is allowed as this is used to communicate to the guest 2482 * that the size of disk has changed! 2483 */ 2484 if ((dev->state != XenbusStateInitialised) && 2485 (dev->state != XenbusStateConnected)) { 2486 if (talk_to_blkback(dev, info)) 2487 break; 2488 } 2489 2490 blkfront_connect(info); 2491 break; 2492 2493 case XenbusStateClosed: 2494 if (dev->state == XenbusStateClosed) 2495 break; 2496 fallthrough; 2497 case XenbusStateClosing: 2498 if (info) 2499 blkfront_closing(info); 2500 break; 2501 } 2502 } 2503 2504 static int blkfront_remove(struct xenbus_device *xbdev) 2505 { 2506 struct blkfront_info *info = dev_get_drvdata(&xbdev->dev); 2507 struct block_device *bdev = NULL; 2508 struct gendisk *disk; 2509 2510 dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename); 2511 2512 if (!info) 2513 return 0; 2514 2515 blkif_free(info, 0); 2516 2517 mutex_lock(&info->mutex); 2518 2519 disk = info->gd; 2520 if (disk) 2521 bdev = bdget_disk(disk, 0); 2522 2523 info->xbdev = NULL; 2524 mutex_unlock(&info->mutex); 2525 2526 if (!bdev) { 2527 mutex_lock(&blkfront_mutex); 2528 free_info(info); 2529 mutex_unlock(&blkfront_mutex); 2530 return 0; 2531 } 2532 2533 /* 2534 * The xbdev was removed before we reached the Closed 2535 * state. See if it's safe to remove the disk. If the bdev 2536 * isn't closed yet, we let release take care of it. 2537 */ 2538 2539 mutex_lock(&bdev->bd_mutex); 2540 info = disk->private_data; 2541 2542 dev_warn(disk_to_dev(disk), 2543 "%s was hot-unplugged, %d stale handles\n", 2544 xbdev->nodename, bdev->bd_openers); 2545 2546 if (info && !bdev->bd_openers) { 2547 xlvbd_release_gendisk(info); 2548 disk->private_data = NULL; 2549 mutex_lock(&blkfront_mutex); 2550 free_info(info); 2551 mutex_unlock(&blkfront_mutex); 2552 } 2553 2554 mutex_unlock(&bdev->bd_mutex); 2555 bdput(bdev); 2556 2557 return 0; 2558 } 2559 2560 static int blkfront_is_ready(struct xenbus_device *dev) 2561 { 2562 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 2563 2564 return info->is_ready && info->xbdev; 2565 } 2566 2567 static int blkif_open(struct block_device *bdev, fmode_t mode) 2568 { 2569 struct gendisk *disk = bdev->bd_disk; 2570 struct blkfront_info *info; 2571 int err = 0; 2572 2573 mutex_lock(&blkfront_mutex); 2574 2575 info = disk->private_data; 2576 if (!info) { 2577 /* xbdev gone */ 2578 err = -ERESTARTSYS; 2579 goto out; 2580 } 2581 2582 mutex_lock(&info->mutex); 2583 2584 if (!info->gd) 2585 /* xbdev is closed */ 2586 err = -ERESTARTSYS; 2587 2588 mutex_unlock(&info->mutex); 2589 2590 out: 2591 mutex_unlock(&blkfront_mutex); 2592 return err; 2593 } 2594 2595 static void blkif_release(struct gendisk *disk, fmode_t mode) 2596 { 2597 struct blkfront_info *info = disk->private_data; 2598 struct block_device *bdev; 2599 struct xenbus_device *xbdev; 2600 2601 mutex_lock(&blkfront_mutex); 2602 2603 bdev = bdget_disk(disk, 0); 2604 2605 if (!bdev) { 2606 WARN(1, "Block device %s yanked out from us!\n", disk->disk_name); 2607 goto out_mutex; 2608 } 2609 if (bdev->bd_openers) 2610 goto out; 2611 2612 /* 2613 * Check if we have been instructed to close. We will have 2614 * deferred this request, because the bdev was still open. 2615 */ 2616 2617 mutex_lock(&info->mutex); 2618 xbdev = info->xbdev; 2619 2620 if (xbdev && xbdev->state == XenbusStateClosing) { 2621 /* pending switch to state closed */ 2622 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); 2623 xlvbd_release_gendisk(info); 2624 xenbus_frontend_closed(info->xbdev); 2625 } 2626 2627 mutex_unlock(&info->mutex); 2628 2629 if (!xbdev) { 2630 /* sudden device removal */ 2631 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); 2632 xlvbd_release_gendisk(info); 2633 disk->private_data = NULL; 2634 free_info(info); 2635 } 2636 2637 out: 2638 bdput(bdev); 2639 out_mutex: 2640 mutex_unlock(&blkfront_mutex); 2641 } 2642 2643 static const struct block_device_operations xlvbd_block_fops = 2644 { 2645 .owner = THIS_MODULE, 2646 .open = blkif_open, 2647 .release = blkif_release, 2648 .getgeo = blkif_getgeo, 2649 .ioctl = blkif_ioctl, 2650 .compat_ioctl = blkdev_compat_ptr_ioctl, 2651 }; 2652 2653 2654 static const struct xenbus_device_id blkfront_ids[] = { 2655 { "vbd" }, 2656 { "" } 2657 }; 2658 2659 static struct xenbus_driver blkfront_driver = { 2660 .ids = blkfront_ids, 2661 .probe = blkfront_probe, 2662 .remove = blkfront_remove, 2663 .resume = blkfront_resume, 2664 .otherend_changed = blkback_changed, 2665 .is_ready = blkfront_is_ready, 2666 }; 2667 2668 static void purge_persistent_grants(struct blkfront_info *info) 2669 { 2670 unsigned int i; 2671 unsigned long flags; 2672 struct blkfront_ring_info *rinfo; 2673 2674 for_each_rinfo(info, rinfo, i) { 2675 struct grant *gnt_list_entry, *tmp; 2676 2677 spin_lock_irqsave(&rinfo->ring_lock, flags); 2678 2679 if (rinfo->persistent_gnts_c == 0) { 2680 spin_unlock_irqrestore(&rinfo->ring_lock, flags); 2681 continue; 2682 } 2683 2684 list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants, 2685 node) { 2686 if (gnt_list_entry->gref == GRANT_INVALID_REF || 2687 gnttab_query_foreign_access(gnt_list_entry->gref)) 2688 continue; 2689 2690 list_del(&gnt_list_entry->node); 2691 gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); 2692 rinfo->persistent_gnts_c--; 2693 gnt_list_entry->gref = GRANT_INVALID_REF; 2694 list_add_tail(&gnt_list_entry->node, &rinfo->grants); 2695 } 2696 2697 spin_unlock_irqrestore(&rinfo->ring_lock, flags); 2698 } 2699 } 2700 2701 static void blkfront_delay_work(struct work_struct *work) 2702 { 2703 struct blkfront_info *info; 2704 bool need_schedule_work = false; 2705 2706 mutex_lock(&blkfront_mutex); 2707 2708 list_for_each_entry(info, &info_list, info_list) { 2709 if (info->feature_persistent) { 2710 need_schedule_work = true; 2711 mutex_lock(&info->mutex); 2712 purge_persistent_grants(info); 2713 mutex_unlock(&info->mutex); 2714 } 2715 } 2716 2717 if (need_schedule_work) 2718 schedule_delayed_work(&blkfront_work, HZ * 10); 2719 2720 mutex_unlock(&blkfront_mutex); 2721 } 2722 2723 static int __init xlblk_init(void) 2724 { 2725 int ret; 2726 int nr_cpus = num_online_cpus(); 2727 2728 if (!xen_domain()) 2729 return -ENODEV; 2730 2731 if (!xen_has_pv_disk_devices()) 2732 return -ENODEV; 2733 2734 if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { 2735 pr_warn("xen_blk: can't get major %d with name %s\n", 2736 XENVBD_MAJOR, DEV_NAME); 2737 return -ENODEV; 2738 } 2739 2740 if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) 2741 xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; 2742 2743 if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) { 2744 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n", 2745 xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER); 2746 xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER; 2747 } 2748 2749 if (xen_blkif_max_queues > nr_cpus) { 2750 pr_info("Invalid max_queues (%d), will use default max: %d.\n", 2751 xen_blkif_max_queues, nr_cpus); 2752 xen_blkif_max_queues = nr_cpus; 2753 } 2754 2755 INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work); 2756 2757 ret = xenbus_register_frontend(&blkfront_driver); 2758 if (ret) { 2759 unregister_blkdev(XENVBD_MAJOR, DEV_NAME); 2760 return ret; 2761 } 2762 2763 return 0; 2764 } 2765 module_init(xlblk_init); 2766 2767 2768 static void __exit xlblk_exit(void) 2769 { 2770 cancel_delayed_work_sync(&blkfront_work); 2771 2772 xenbus_unregister_driver(&blkfront_driver); 2773 unregister_blkdev(XENVBD_MAJOR, DEV_NAME); 2774 kfree(minors); 2775 } 2776 module_exit(xlblk_exit); 2777 2778 MODULE_DESCRIPTION("Xen virtual block device frontend"); 2779 MODULE_LICENSE("GPL"); 2780 MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR); 2781 MODULE_ALIAS("xen:vbd"); 2782 MODULE_ALIAS("xenblk"); 2783