1 /****************************************************************************** 2 * 3 * Back-end of the driver for virtual block devices. This portion of the 4 * driver exports a 'unified' block-device interface that can be accessed 5 * by any operating system that implements a compatible front end. A 6 * reference front-end implementation can be found in: 7 * drivers/block/xen-blkfront.c 8 * 9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand 10 * Copyright (c) 2005, Christopher Clark 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License version 2 14 * as published by the Free Software Foundation; or, when distributed 15 * separately from the Linux kernel or incorporated into other 16 * software packages, subject to the following license: 17 * 18 * Permission is hereby granted, free of charge, to any person obtaining a copy 19 * of this source file (the "Software"), to deal in the Software without 20 * restriction, including without limitation the rights to use, copy, modify, 21 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 22 * and to permit persons to whom the Software is furnished to do so, subject to 23 * the following conditions: 24 * 25 * The above copyright notice and this permission notice shall be included in 26 * all copies or substantial portions of the Software. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 34 * IN THE SOFTWARE. 35 */ 36 37 #include <linux/spinlock.h> 38 #include <linux/kthread.h> 39 #include <linux/list.h> 40 #include <linux/delay.h> 41 #include <linux/freezer.h> 42 43 #include <xen/events.h> 44 #include <xen/page.h> 45 #include <asm/xen/hypervisor.h> 46 #include <asm/xen/hypercall.h> 47 #include "common.h" 48 49 /* 50 * These are rather arbitrary. They are fairly large because adjacent requests 51 * pulled from a communication ring are quite likely to end up being part of 52 * the same scatter/gather request at the disc. 53 * 54 * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW ** 55 * 56 * This will increase the chances of being able to write whole tracks. 57 * 64 should be enough to keep us competitive with Linux. 58 */ 59 static int xen_blkif_reqs = 64; 60 module_param_named(reqs, xen_blkif_reqs, int, 0); 61 MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate"); 62 63 /* Run-time switchable: /sys/module/blkback/parameters/ */ 64 static unsigned int log_stats; 65 module_param(log_stats, int, 0644); 66 67 /* 68 * Each outstanding request that we've passed to the lower device layers has a 69 * 'pending_req' allocated to it. Each buffer_head that completes decrements 70 * the pendcnt towards zero. When it hits zero, the specified domain has a 71 * response queued for it, with the saved 'id' passed back. 72 */ 73 struct pending_req { 74 struct xen_blkif *blkif; 75 u64 id; 76 int nr_pages; 77 atomic_t pendcnt; 78 unsigned short operation; 79 int status; 80 struct list_head free_list; 81 }; 82 83 #define BLKBACK_INVALID_HANDLE (~0) 84 85 struct xen_blkbk { 86 struct pending_req *pending_reqs; 87 /* List of all 'pending_req' available */ 88 struct list_head pending_free; 89 /* And its spinlock. */ 90 spinlock_t pending_free_lock; 91 wait_queue_head_t pending_free_wq; 92 /* The list of all pages that are available. */ 93 struct page **pending_pages; 94 /* And the grant handles that are available. */ 95 grant_handle_t *pending_grant_handles; 96 }; 97 98 static struct xen_blkbk *blkbk; 99 100 /* 101 * Little helpful macro to figure out the index and virtual address of the 102 * pending_pages[..]. For each 'pending_req' we have have up to 103 * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through 104 * 10 and would index in the pending_pages[..]. 105 */ 106 static inline int vaddr_pagenr(struct pending_req *req, int seg) 107 { 108 return (req - blkbk->pending_reqs) * 109 BLKIF_MAX_SEGMENTS_PER_REQUEST + seg; 110 } 111 112 #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)] 113 114 static inline unsigned long vaddr(struct pending_req *req, int seg) 115 { 116 unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg)); 117 return (unsigned long)pfn_to_kaddr(pfn); 118 } 119 120 #define pending_handle(_req, _seg) \ 121 (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)]) 122 123 124 static int do_block_io_op(struct xen_blkif *blkif); 125 static int dispatch_rw_block_io(struct xen_blkif *blkif, 126 struct blkif_request *req, 127 struct pending_req *pending_req); 128 static void make_response(struct xen_blkif *blkif, u64 id, 129 unsigned short op, int st); 130 131 /* 132 * Retrieve from the 'pending_reqs' a free pending_req structure to be used. 133 */ 134 static struct pending_req *alloc_req(void) 135 { 136 struct pending_req *req = NULL; 137 unsigned long flags; 138 139 spin_lock_irqsave(&blkbk->pending_free_lock, flags); 140 if (!list_empty(&blkbk->pending_free)) { 141 req = list_entry(blkbk->pending_free.next, struct pending_req, 142 free_list); 143 list_del(&req->free_list); 144 } 145 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); 146 return req; 147 } 148 149 /* 150 * Return the 'pending_req' structure back to the freepool. We also 151 * wake up the thread if it was waiting for a free page. 152 */ 153 static void free_req(struct pending_req *req) 154 { 155 unsigned long flags; 156 int was_empty; 157 158 spin_lock_irqsave(&blkbk->pending_free_lock, flags); 159 was_empty = list_empty(&blkbk->pending_free); 160 list_add(&req->free_list, &blkbk->pending_free); 161 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); 162 if (was_empty) 163 wake_up(&blkbk->pending_free_wq); 164 } 165 166 /* 167 * Routines for managing virtual block devices (vbds). 168 */ 169 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, 170 int operation) 171 { 172 struct xen_vbd *vbd = &blkif->vbd; 173 int rc = -EACCES; 174 175 if ((operation != READ) && vbd->readonly) 176 goto out; 177 178 if (likely(req->nr_sects)) { 179 blkif_sector_t end = req->sector_number + req->nr_sects; 180 181 if (unlikely(end < req->sector_number)) 182 goto out; 183 if (unlikely(end > vbd_sz(vbd))) 184 goto out; 185 } 186 187 req->dev = vbd->pdevice; 188 req->bdev = vbd->bdev; 189 rc = 0; 190 191 out: 192 return rc; 193 } 194 195 static void xen_vbd_resize(struct xen_blkif *blkif) 196 { 197 struct xen_vbd *vbd = &blkif->vbd; 198 struct xenbus_transaction xbt; 199 int err; 200 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be); 201 unsigned long long new_size = vbd_sz(vbd); 202 203 pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n", 204 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice)); 205 pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size); 206 vbd->size = new_size; 207 again: 208 err = xenbus_transaction_start(&xbt); 209 if (err) { 210 pr_warn(DRV_PFX "Error starting transaction"); 211 return; 212 } 213 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", 214 (unsigned long long)vbd_sz(vbd)); 215 if (err) { 216 pr_warn(DRV_PFX "Error writing new size"); 217 goto abort; 218 } 219 /* 220 * Write the current state; we will use this to synchronize 221 * the front-end. If the current state is "connected" the 222 * front-end will get the new size information online. 223 */ 224 err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state); 225 if (err) { 226 pr_warn(DRV_PFX "Error writing the state"); 227 goto abort; 228 } 229 230 err = xenbus_transaction_end(xbt, 0); 231 if (err == -EAGAIN) 232 goto again; 233 if (err) 234 pr_warn(DRV_PFX "Error ending transaction"); 235 return; 236 abort: 237 xenbus_transaction_end(xbt, 1); 238 } 239 240 /* 241 * Notification from the guest OS. 242 */ 243 static void blkif_notify_work(struct xen_blkif *blkif) 244 { 245 blkif->waiting_reqs = 1; 246 wake_up(&blkif->wq); 247 } 248 249 irqreturn_t xen_blkif_be_int(int irq, void *dev_id) 250 { 251 blkif_notify_work(dev_id); 252 return IRQ_HANDLED; 253 } 254 255 /* 256 * SCHEDULER FUNCTIONS 257 */ 258 259 static void print_stats(struct xen_blkif *blkif) 260 { 261 pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d" 262 " | ds %4d\n", 263 current->comm, blkif->st_oo_req, 264 blkif->st_rd_req, blkif->st_wr_req, 265 blkif->st_f_req, blkif->st_ds_req); 266 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); 267 blkif->st_rd_req = 0; 268 blkif->st_wr_req = 0; 269 blkif->st_oo_req = 0; 270 blkif->st_ds_req = 0; 271 } 272 273 int xen_blkif_schedule(void *arg) 274 { 275 struct xen_blkif *blkif = arg; 276 struct xen_vbd *vbd = &blkif->vbd; 277 278 xen_blkif_get(blkif); 279 280 while (!kthread_should_stop()) { 281 if (try_to_freeze()) 282 continue; 283 if (unlikely(vbd->size != vbd_sz(vbd))) 284 xen_vbd_resize(blkif); 285 286 wait_event_interruptible( 287 blkif->wq, 288 blkif->waiting_reqs || kthread_should_stop()); 289 wait_event_interruptible( 290 blkbk->pending_free_wq, 291 !list_empty(&blkbk->pending_free) || 292 kthread_should_stop()); 293 294 blkif->waiting_reqs = 0; 295 smp_mb(); /* clear flag *before* checking for work */ 296 297 if (do_block_io_op(blkif)) 298 blkif->waiting_reqs = 1; 299 300 if (log_stats && time_after(jiffies, blkif->st_print)) 301 print_stats(blkif); 302 } 303 304 if (log_stats) 305 print_stats(blkif); 306 307 blkif->xenblkd = NULL; 308 xen_blkif_put(blkif); 309 310 return 0; 311 } 312 313 struct seg_buf { 314 unsigned long buf; 315 unsigned int nsec; 316 }; 317 /* 318 * Unmap the grant references, and also remove the M2P over-rides 319 * used in the 'pending_req'. 320 */ 321 static void xen_blkbk_unmap(struct pending_req *req) 322 { 323 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 324 unsigned int i, invcount = 0; 325 grant_handle_t handle; 326 int ret; 327 328 for (i = 0; i < req->nr_pages; i++) { 329 handle = pending_handle(req, i); 330 if (handle == BLKBACK_INVALID_HANDLE) 331 continue; 332 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i), 333 GNTMAP_host_map, handle); 334 pending_handle(req, i) = BLKBACK_INVALID_HANDLE; 335 invcount++; 336 } 337 338 ret = HYPERVISOR_grant_table_op( 339 GNTTABOP_unmap_grant_ref, unmap, invcount); 340 BUG_ON(ret); 341 /* 342 * Note, we use invcount, so nr->pages, so we can't index 343 * using vaddr(req, i). 344 */ 345 for (i = 0; i < invcount; i++) { 346 ret = m2p_remove_override( 347 virt_to_page(unmap[i].host_addr), false); 348 if (ret) { 349 pr_alert(DRV_PFX "Failed to remove M2P override for %lx\n", 350 (unsigned long)unmap[i].host_addr); 351 continue; 352 } 353 } 354 } 355 356 static int xen_blkbk_map(struct blkif_request *req, 357 struct pending_req *pending_req, 358 struct seg_buf seg[]) 359 { 360 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 361 int i; 362 int nseg = req->u.rw.nr_segments; 363 int ret = 0; 364 365 /* 366 * Fill out preq.nr_sects with proper amount of sectors, and setup 367 * assign map[..] with the PFN of the page in our domain with the 368 * corresponding grant reference for each page. 369 */ 370 for (i = 0; i < nseg; i++) { 371 uint32_t flags; 372 373 flags = GNTMAP_host_map; 374 if (pending_req->operation != BLKIF_OP_READ) 375 flags |= GNTMAP_readonly; 376 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags, 377 req->u.rw.seg[i].gref, 378 pending_req->blkif->domid); 379 } 380 381 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg); 382 BUG_ON(ret); 383 384 /* 385 * Now swizzle the MFN in our domain with the MFN from the other domain 386 * so that when we access vaddr(pending_req,i) it has the contents of 387 * the page from the other domain. 388 */ 389 for (i = 0; i < nseg; i++) { 390 if (unlikely(map[i].status != 0)) { 391 pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); 392 map[i].handle = BLKBACK_INVALID_HANDLE; 393 ret |= 1; 394 } 395 396 pending_handle(pending_req, i) = map[i].handle; 397 398 if (ret) 399 continue; 400 401 ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr), 402 blkbk->pending_page(pending_req, i), NULL); 403 if (ret) { 404 pr_alert(DRV_PFX "Failed to install M2P override for %lx (ret: %d)\n", 405 (unsigned long)map[i].dev_bus_addr, ret); 406 /* We could switch over to GNTTABOP_copy */ 407 continue; 408 } 409 410 seg[i].buf = map[i].dev_bus_addr | 411 (req->u.rw.seg[i].first_sect << 9); 412 } 413 return ret; 414 } 415 416 static int dispatch_discard_io(struct xen_blkif *blkif, 417 struct blkif_request *req) 418 { 419 int err = 0; 420 int status = BLKIF_RSP_OKAY; 421 struct block_device *bdev = blkif->vbd.bdev; 422 423 blkif->st_ds_req++; 424 425 xen_blkif_get(blkif); 426 if (blkif->blk_backend_type == BLKIF_BACKEND_PHY || 427 blkif->blk_backend_type == BLKIF_BACKEND_FILE) { 428 unsigned long secure = (blkif->vbd.discard_secure && 429 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ? 430 BLKDEV_DISCARD_SECURE : 0; 431 err = blkdev_issue_discard(bdev, 432 req->u.discard.sector_number, 433 req->u.discard.nr_sectors, 434 GFP_KERNEL, secure); 435 } else 436 err = -EOPNOTSUPP; 437 438 if (err == -EOPNOTSUPP) { 439 pr_debug(DRV_PFX "discard op failed, not supported\n"); 440 status = BLKIF_RSP_EOPNOTSUPP; 441 } else if (err) 442 status = BLKIF_RSP_ERROR; 443 444 make_response(blkif, req->u.discard.id, req->operation, status); 445 xen_blkif_put(blkif); 446 return err; 447 } 448 449 static void xen_blk_drain_io(struct xen_blkif *blkif) 450 { 451 atomic_set(&blkif->drain, 1); 452 do { 453 /* The initial value is one, and one refcnt taken at the 454 * start of the xen_blkif_schedule thread. */ 455 if (atomic_read(&blkif->refcnt) <= 2) 456 break; 457 wait_for_completion_interruptible_timeout( 458 &blkif->drain_complete, HZ); 459 460 if (!atomic_read(&blkif->drain)) 461 break; 462 } while (!kthread_should_stop()); 463 atomic_set(&blkif->drain, 0); 464 } 465 466 /* 467 * Completion callback on the bio's. Called as bh->b_end_io() 468 */ 469 470 static void __end_block_io_op(struct pending_req *pending_req, int error) 471 { 472 /* An error fails the entire request. */ 473 if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) && 474 (error == -EOPNOTSUPP)) { 475 pr_debug(DRV_PFX "flush diskcache op failed, not supported\n"); 476 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0); 477 pending_req->status = BLKIF_RSP_EOPNOTSUPP; 478 } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && 479 (error == -EOPNOTSUPP)) { 480 pr_debug(DRV_PFX "write barrier op failed, not supported\n"); 481 xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0); 482 pending_req->status = BLKIF_RSP_EOPNOTSUPP; 483 } else if (error) { 484 pr_debug(DRV_PFX "Buffer not up-to-date at end of operation," 485 " error=%d\n", error); 486 pending_req->status = BLKIF_RSP_ERROR; 487 } 488 489 /* 490 * If all of the bio's have completed it is time to unmap 491 * the grant references associated with 'request' and provide 492 * the proper response on the ring. 493 */ 494 if (atomic_dec_and_test(&pending_req->pendcnt)) { 495 xen_blkbk_unmap(pending_req); 496 make_response(pending_req->blkif, pending_req->id, 497 pending_req->operation, pending_req->status); 498 xen_blkif_put(pending_req->blkif); 499 if (atomic_read(&pending_req->blkif->refcnt) <= 2) { 500 if (atomic_read(&pending_req->blkif->drain)) 501 complete(&pending_req->blkif->drain_complete); 502 } 503 free_req(pending_req); 504 } 505 } 506 507 /* 508 * bio callback. 509 */ 510 static void end_block_io_op(struct bio *bio, int error) 511 { 512 __end_block_io_op(bio->bi_private, error); 513 bio_put(bio); 514 } 515 516 517 518 /* 519 * Function to copy the from the ring buffer the 'struct blkif_request' 520 * (which has the sectors we want, number of them, grant references, etc), 521 * and transmute it to the block API to hand it over to the proper block disk. 522 */ 523 static int 524 __do_block_io_op(struct xen_blkif *blkif) 525 { 526 union blkif_back_rings *blk_rings = &blkif->blk_rings; 527 struct blkif_request req; 528 struct pending_req *pending_req; 529 RING_IDX rc, rp; 530 int more_to_do = 0; 531 532 rc = blk_rings->common.req_cons; 533 rp = blk_rings->common.sring->req_prod; 534 rmb(); /* Ensure we see queued requests up to 'rp'. */ 535 536 while (rc != rp) { 537 538 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) 539 break; 540 541 if (kthread_should_stop()) { 542 more_to_do = 1; 543 break; 544 } 545 546 pending_req = alloc_req(); 547 if (NULL == pending_req) { 548 blkif->st_oo_req++; 549 more_to_do = 1; 550 break; 551 } 552 553 switch (blkif->blk_protocol) { 554 case BLKIF_PROTOCOL_NATIVE: 555 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req)); 556 break; 557 case BLKIF_PROTOCOL_X86_32: 558 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc)); 559 break; 560 case BLKIF_PROTOCOL_X86_64: 561 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc)); 562 break; 563 default: 564 BUG(); 565 } 566 blk_rings->common.req_cons = ++rc; /* before make_response() */ 567 568 /* Apply all sanity checks to /private copy/ of request. */ 569 barrier(); 570 if (unlikely(req.operation == BLKIF_OP_DISCARD)) { 571 free_req(pending_req); 572 if (dispatch_discard_io(blkif, &req)) 573 break; 574 } else if (dispatch_rw_block_io(blkif, &req, pending_req)) 575 break; 576 577 /* Yield point for this unbounded loop. */ 578 cond_resched(); 579 } 580 581 return more_to_do; 582 } 583 584 static int 585 do_block_io_op(struct xen_blkif *blkif) 586 { 587 union blkif_back_rings *blk_rings = &blkif->blk_rings; 588 int more_to_do; 589 590 do { 591 more_to_do = __do_block_io_op(blkif); 592 if (more_to_do) 593 break; 594 595 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do); 596 } while (more_to_do); 597 598 return more_to_do; 599 } 600 /* 601 * Transmutation of the 'struct blkif_request' to a proper 'struct bio' 602 * and call the 'submit_bio' to pass it to the underlying storage. 603 */ 604 static int dispatch_rw_block_io(struct xen_blkif *blkif, 605 struct blkif_request *req, 606 struct pending_req *pending_req) 607 { 608 struct phys_req preq; 609 struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 610 unsigned int nseg; 611 struct bio *bio = NULL; 612 struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 613 int i, nbio = 0; 614 int operation; 615 struct blk_plug plug; 616 bool drain = false; 617 618 switch (req->operation) { 619 case BLKIF_OP_READ: 620 blkif->st_rd_req++; 621 operation = READ; 622 break; 623 case BLKIF_OP_WRITE: 624 blkif->st_wr_req++; 625 operation = WRITE_ODIRECT; 626 break; 627 case BLKIF_OP_WRITE_BARRIER: 628 drain = true; 629 case BLKIF_OP_FLUSH_DISKCACHE: 630 blkif->st_f_req++; 631 operation = WRITE_FLUSH; 632 break; 633 default: 634 operation = 0; /* make gcc happy */ 635 goto fail_response; 636 break; 637 } 638 639 /* Check that the number of segments is sane. */ 640 nseg = req->u.rw.nr_segments; 641 642 if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || 643 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { 644 pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", 645 nseg); 646 /* Haven't submitted any bio's yet. */ 647 goto fail_response; 648 } 649 650 preq.dev = req->u.rw.handle; 651 preq.sector_number = req->u.rw.sector_number; 652 preq.nr_sects = 0; 653 654 pending_req->blkif = blkif; 655 pending_req->id = req->u.rw.id; 656 pending_req->operation = req->operation; 657 pending_req->status = BLKIF_RSP_OKAY; 658 pending_req->nr_pages = nseg; 659 660 for (i = 0; i < nseg; i++) { 661 seg[i].nsec = req->u.rw.seg[i].last_sect - 662 req->u.rw.seg[i].first_sect + 1; 663 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) || 664 (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect)) 665 goto fail_response; 666 preq.nr_sects += seg[i].nsec; 667 668 } 669 670 if (xen_vbd_translate(&preq, blkif, operation) != 0) { 671 pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", 672 operation == READ ? "read" : "write", 673 preq.sector_number, 674 preq.sector_number + preq.nr_sects, preq.dev); 675 goto fail_response; 676 } 677 678 /* 679 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev 680 * is set there. 681 */ 682 for (i = 0; i < nseg; i++) { 683 if (((int)preq.sector_number|(int)seg[i].nsec) & 684 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { 685 pr_debug(DRV_PFX "Misaligned I/O request from domain %d", 686 blkif->domid); 687 goto fail_response; 688 } 689 } 690 691 /* Wait on all outstanding I/O's and once that has been completed 692 * issue the WRITE_FLUSH. 693 */ 694 if (drain) 695 xen_blk_drain_io(pending_req->blkif); 696 697 /* 698 * If we have failed at this point, we need to undo the M2P override, 699 * set gnttab_set_unmap_op on all of the grant references and perform 700 * the hypercall to unmap the grants - that is all done in 701 * xen_blkbk_unmap. 702 */ 703 if (xen_blkbk_map(req, pending_req, seg)) 704 goto fail_flush; 705 706 /* 707 * This corresponding xen_blkif_put is done in __end_block_io_op, or 708 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. 709 */ 710 xen_blkif_get(blkif); 711 712 for (i = 0; i < nseg; i++) { 713 while ((bio == NULL) || 714 (bio_add_page(bio, 715 blkbk->pending_page(pending_req, i), 716 seg[i].nsec << 9, 717 seg[i].buf & ~PAGE_MASK) == 0)) { 718 719 bio = bio_alloc(GFP_KERNEL, nseg-i); 720 if (unlikely(bio == NULL)) 721 goto fail_put_bio; 722 723 biolist[nbio++] = bio; 724 bio->bi_bdev = preq.bdev; 725 bio->bi_private = pending_req; 726 bio->bi_end_io = end_block_io_op; 727 bio->bi_sector = preq.sector_number; 728 } 729 730 preq.sector_number += seg[i].nsec; 731 } 732 733 /* This will be hit if the operation was a flush or discard. */ 734 if (!bio) { 735 BUG_ON(operation != WRITE_FLUSH); 736 737 bio = bio_alloc(GFP_KERNEL, 0); 738 if (unlikely(bio == NULL)) 739 goto fail_put_bio; 740 741 biolist[nbio++] = bio; 742 bio->bi_bdev = preq.bdev; 743 bio->bi_private = pending_req; 744 bio->bi_end_io = end_block_io_op; 745 } 746 747 /* 748 * We set it one so that the last submit_bio does not have to call 749 * atomic_inc. 750 */ 751 atomic_set(&pending_req->pendcnt, nbio); 752 753 /* Get a reference count for the disk queue and start sending I/O */ 754 blk_start_plug(&plug); 755 756 for (i = 0; i < nbio; i++) 757 submit_bio(operation, biolist[i]); 758 759 /* Let the I/Os go.. */ 760 blk_finish_plug(&plug); 761 762 if (operation == READ) 763 blkif->st_rd_sect += preq.nr_sects; 764 else if (operation & WRITE) 765 blkif->st_wr_sect += preq.nr_sects; 766 767 return 0; 768 769 fail_flush: 770 xen_blkbk_unmap(pending_req); 771 fail_response: 772 /* Haven't submitted any bio's yet. */ 773 make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR); 774 free_req(pending_req); 775 msleep(1); /* back off a bit */ 776 return -EIO; 777 778 fail_put_bio: 779 for (i = 0; i < nbio; i++) 780 bio_put(biolist[i]); 781 __end_block_io_op(pending_req, -EINVAL); 782 msleep(1); /* back off a bit */ 783 return -EIO; 784 } 785 786 787 788 /* 789 * Put a response on the ring on how the operation fared. 790 */ 791 static void make_response(struct xen_blkif *blkif, u64 id, 792 unsigned short op, int st) 793 { 794 struct blkif_response resp; 795 unsigned long flags; 796 union blkif_back_rings *blk_rings = &blkif->blk_rings; 797 int notify; 798 799 resp.id = id; 800 resp.operation = op; 801 resp.status = st; 802 803 spin_lock_irqsave(&blkif->blk_ring_lock, flags); 804 /* Place on the response ring for the relevant domain. */ 805 switch (blkif->blk_protocol) { 806 case BLKIF_PROTOCOL_NATIVE: 807 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), 808 &resp, sizeof(resp)); 809 break; 810 case BLKIF_PROTOCOL_X86_32: 811 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), 812 &resp, sizeof(resp)); 813 break; 814 case BLKIF_PROTOCOL_X86_64: 815 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), 816 &resp, sizeof(resp)); 817 break; 818 default: 819 BUG(); 820 } 821 blk_rings->common.rsp_prod_pvt++; 822 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); 823 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); 824 if (notify) 825 notify_remote_via_irq(blkif->irq); 826 } 827 828 static int __init xen_blkif_init(void) 829 { 830 int i, mmap_pages; 831 int rc = 0; 832 833 if (!xen_pv_domain()) 834 return -ENODEV; 835 836 blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL); 837 if (!blkbk) { 838 pr_alert(DRV_PFX "%s: out of memory!\n", __func__); 839 return -ENOMEM; 840 } 841 842 mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST; 843 844 blkbk->pending_reqs = kzalloc(sizeof(blkbk->pending_reqs[0]) * 845 xen_blkif_reqs, GFP_KERNEL); 846 blkbk->pending_grant_handles = kmalloc(sizeof(blkbk->pending_grant_handles[0]) * 847 mmap_pages, GFP_KERNEL); 848 blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) * 849 mmap_pages, GFP_KERNEL); 850 851 if (!blkbk->pending_reqs || !blkbk->pending_grant_handles || 852 !blkbk->pending_pages) { 853 rc = -ENOMEM; 854 goto out_of_memory; 855 } 856 857 for (i = 0; i < mmap_pages; i++) { 858 blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE; 859 blkbk->pending_pages[i] = alloc_page(GFP_KERNEL); 860 if (blkbk->pending_pages[i] == NULL) { 861 rc = -ENOMEM; 862 goto out_of_memory; 863 } 864 } 865 rc = xen_blkif_interface_init(); 866 if (rc) 867 goto failed_init; 868 869 INIT_LIST_HEAD(&blkbk->pending_free); 870 spin_lock_init(&blkbk->pending_free_lock); 871 init_waitqueue_head(&blkbk->pending_free_wq); 872 873 for (i = 0; i < xen_blkif_reqs; i++) 874 list_add_tail(&blkbk->pending_reqs[i].free_list, 875 &blkbk->pending_free); 876 877 rc = xen_blkif_xenbus_init(); 878 if (rc) 879 goto failed_init; 880 881 return 0; 882 883 out_of_memory: 884 pr_alert(DRV_PFX "%s: out of memory\n", __func__); 885 failed_init: 886 kfree(blkbk->pending_reqs); 887 kfree(blkbk->pending_grant_handles); 888 if (blkbk->pending_pages) { 889 for (i = 0; i < mmap_pages; i++) { 890 if (blkbk->pending_pages[i]) 891 __free_page(blkbk->pending_pages[i]); 892 } 893 kfree(blkbk->pending_pages); 894 } 895 kfree(blkbk); 896 blkbk = NULL; 897 return rc; 898 } 899 900 module_init(xen_blkif_init); 901 902 MODULE_LICENSE("Dual BSD/GPL"); 903 MODULE_ALIAS("xen-backend:vbd"); 904