1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm.h" 9 #include "dm-uevent.h" 10 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/mutex.h> 14 #include <linux/moduleparam.h> 15 #include <linux/blkpg.h> 16 #include <linux/bio.h> 17 #include <linux/buffer_head.h> 18 #include <linux/mempool.h> 19 #include <linux/slab.h> 20 #include <linux/idr.h> 21 #include <linux/hdreg.h> 22 #include <linux/delay.h> 23 24 #include <trace/events/block.h> 25 26 #define DM_MSG_PREFIX "core" 27 28 /* 29 * Cookies are numeric values sent with CHANGE and REMOVE 30 * uevents while resuming, removing or renaming the device. 31 */ 32 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 33 #define DM_COOKIE_LENGTH 24 34 35 static DEFINE_MUTEX(dm_mutex); 36 static const char *_name = DM_NAME; 37 38 static unsigned int major = 0; 39 static unsigned int _major = 0; 40 41 static DEFINE_SPINLOCK(_minor_lock); 42 /* 43 * For bio-based dm. 44 * One of these is allocated per bio. 45 */ 46 struct dm_io { 47 struct mapped_device *md; 48 int error; 49 atomic_t io_count; 50 struct bio *bio; 51 unsigned long start_time; 52 spinlock_t endio_lock; 53 }; 54 55 /* 56 * For bio-based dm. 57 * One of these is allocated per target within a bio. Hopefully 58 * this will be simplified out one day. 59 */ 60 struct dm_target_io { 61 struct dm_io *io; 62 struct dm_target *ti; 63 union map_info info; 64 }; 65 66 /* 67 * For request-based dm. 68 * One of these is allocated per request. 69 */ 70 struct dm_rq_target_io { 71 struct mapped_device *md; 72 struct dm_target *ti; 73 struct request *orig, clone; 74 int error; 75 union map_info info; 76 }; 77 78 /* 79 * For request-based dm. 80 * One of these is allocated per bio. 81 */ 82 struct dm_rq_clone_bio_info { 83 struct bio *orig; 84 struct dm_rq_target_io *tio; 85 }; 86 87 union map_info *dm_get_mapinfo(struct bio *bio) 88 { 89 if (bio && bio->bi_private) 90 return &((struct dm_target_io *)bio->bi_private)->info; 91 return NULL; 92 } 93 94 union map_info *dm_get_rq_mapinfo(struct request *rq) 95 { 96 if (rq && rq->end_io_data) 97 return &((struct dm_rq_target_io *)rq->end_io_data)->info; 98 return NULL; 99 } 100 EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); 101 102 #define MINOR_ALLOCED ((void *)-1) 103 104 /* 105 * Bits for the md->flags field. 106 */ 107 #define DMF_BLOCK_IO_FOR_SUSPEND 0 108 #define DMF_SUSPENDED 1 109 #define DMF_FROZEN 2 110 #define DMF_FREEING 3 111 #define DMF_DELETING 4 112 #define DMF_NOFLUSH_SUSPENDING 5 113 114 /* 115 * Work processed by per-device workqueue. 116 */ 117 struct mapped_device { 118 struct rw_semaphore io_lock; 119 struct mutex suspend_lock; 120 rwlock_t map_lock; 121 atomic_t holders; 122 atomic_t open_count; 123 124 unsigned long flags; 125 126 struct request_queue *queue; 127 unsigned type; 128 /* Protect queue and type against concurrent access. */ 129 struct mutex type_lock; 130 131 struct gendisk *disk; 132 char name[16]; 133 134 void *interface_ptr; 135 136 /* 137 * A list of ios that arrived while we were suspended. 138 */ 139 atomic_t pending[2]; 140 wait_queue_head_t wait; 141 struct work_struct work; 142 struct bio_list deferred; 143 spinlock_t deferred_lock; 144 145 /* 146 * Processing queue (flush) 147 */ 148 struct workqueue_struct *wq; 149 150 /* 151 * The current mapping. 152 */ 153 struct dm_table *map; 154 155 /* 156 * io objects are allocated from here. 157 */ 158 mempool_t *io_pool; 159 mempool_t *tio_pool; 160 161 struct bio_set *bs; 162 163 /* 164 * Event handling. 165 */ 166 atomic_t event_nr; 167 wait_queue_head_t eventq; 168 atomic_t uevent_seq; 169 struct list_head uevent_list; 170 spinlock_t uevent_lock; /* Protect access to uevent_list */ 171 172 /* 173 * freeze/thaw support require holding onto a super block 174 */ 175 struct super_block *frozen_sb; 176 struct block_device *bdev; 177 178 /* forced geometry settings */ 179 struct hd_geometry geometry; 180 181 /* For saving the address of __make_request for request based dm */ 182 make_request_fn *saved_make_request_fn; 183 184 /* sysfs handle */ 185 struct kobject kobj; 186 187 /* zero-length flush that will be cloned and submitted to targets */ 188 struct bio flush_bio; 189 }; 190 191 /* 192 * For mempools pre-allocation at the table loading time. 193 */ 194 struct dm_md_mempools { 195 mempool_t *io_pool; 196 mempool_t *tio_pool; 197 struct bio_set *bs; 198 }; 199 200 #define MIN_IOS 256 201 static struct kmem_cache *_io_cache; 202 static struct kmem_cache *_tio_cache; 203 static struct kmem_cache *_rq_tio_cache; 204 static struct kmem_cache *_rq_bio_info_cache; 205 206 static int __init local_init(void) 207 { 208 int r = -ENOMEM; 209 210 /* allocate a slab for the dm_ios */ 211 _io_cache = KMEM_CACHE(dm_io, 0); 212 if (!_io_cache) 213 return r; 214 215 /* allocate a slab for the target ios */ 216 _tio_cache = KMEM_CACHE(dm_target_io, 0); 217 if (!_tio_cache) 218 goto out_free_io_cache; 219 220 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 221 if (!_rq_tio_cache) 222 goto out_free_tio_cache; 223 224 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0); 225 if (!_rq_bio_info_cache) 226 goto out_free_rq_tio_cache; 227 228 r = dm_uevent_init(); 229 if (r) 230 goto out_free_rq_bio_info_cache; 231 232 _major = major; 233 r = register_blkdev(_major, _name); 234 if (r < 0) 235 goto out_uevent_exit; 236 237 if (!_major) 238 _major = r; 239 240 return 0; 241 242 out_uevent_exit: 243 dm_uevent_exit(); 244 out_free_rq_bio_info_cache: 245 kmem_cache_destroy(_rq_bio_info_cache); 246 out_free_rq_tio_cache: 247 kmem_cache_destroy(_rq_tio_cache); 248 out_free_tio_cache: 249 kmem_cache_destroy(_tio_cache); 250 out_free_io_cache: 251 kmem_cache_destroy(_io_cache); 252 253 return r; 254 } 255 256 static void local_exit(void) 257 { 258 kmem_cache_destroy(_rq_bio_info_cache); 259 kmem_cache_destroy(_rq_tio_cache); 260 kmem_cache_destroy(_tio_cache); 261 kmem_cache_destroy(_io_cache); 262 unregister_blkdev(_major, _name); 263 dm_uevent_exit(); 264 265 _major = 0; 266 267 DMINFO("cleaned up"); 268 } 269 270 static int (*_inits[])(void) __initdata = { 271 local_init, 272 dm_target_init, 273 dm_linear_init, 274 dm_stripe_init, 275 dm_io_init, 276 dm_kcopyd_init, 277 dm_interface_init, 278 }; 279 280 static void (*_exits[])(void) = { 281 local_exit, 282 dm_target_exit, 283 dm_linear_exit, 284 dm_stripe_exit, 285 dm_io_exit, 286 dm_kcopyd_exit, 287 dm_interface_exit, 288 }; 289 290 static int __init dm_init(void) 291 { 292 const int count = ARRAY_SIZE(_inits); 293 294 int r, i; 295 296 for (i = 0; i < count; i++) { 297 r = _inits[i](); 298 if (r) 299 goto bad; 300 } 301 302 return 0; 303 304 bad: 305 while (i--) 306 _exits[i](); 307 308 return r; 309 } 310 311 static void __exit dm_exit(void) 312 { 313 int i = ARRAY_SIZE(_exits); 314 315 while (i--) 316 _exits[i](); 317 } 318 319 /* 320 * Block device functions 321 */ 322 int dm_deleting_md(struct mapped_device *md) 323 { 324 return test_bit(DMF_DELETING, &md->flags); 325 } 326 327 static int dm_blk_open(struct block_device *bdev, fmode_t mode) 328 { 329 struct mapped_device *md; 330 331 mutex_lock(&dm_mutex); 332 spin_lock(&_minor_lock); 333 334 md = bdev->bd_disk->private_data; 335 if (!md) 336 goto out; 337 338 if (test_bit(DMF_FREEING, &md->flags) || 339 dm_deleting_md(md)) { 340 md = NULL; 341 goto out; 342 } 343 344 dm_get(md); 345 atomic_inc(&md->open_count); 346 347 out: 348 spin_unlock(&_minor_lock); 349 mutex_unlock(&dm_mutex); 350 351 return md ? 0 : -ENXIO; 352 } 353 354 static int dm_blk_close(struct gendisk *disk, fmode_t mode) 355 { 356 struct mapped_device *md = disk->private_data; 357 358 mutex_lock(&dm_mutex); 359 atomic_dec(&md->open_count); 360 dm_put(md); 361 mutex_unlock(&dm_mutex); 362 363 return 0; 364 } 365 366 int dm_open_count(struct mapped_device *md) 367 { 368 return atomic_read(&md->open_count); 369 } 370 371 /* 372 * Guarantees nothing is using the device before it's deleted. 373 */ 374 int dm_lock_for_deletion(struct mapped_device *md) 375 { 376 int r = 0; 377 378 spin_lock(&_minor_lock); 379 380 if (dm_open_count(md)) 381 r = -EBUSY; 382 else 383 set_bit(DMF_DELETING, &md->flags); 384 385 spin_unlock(&_minor_lock); 386 387 return r; 388 } 389 390 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 391 { 392 struct mapped_device *md = bdev->bd_disk->private_data; 393 394 return dm_get_geometry(md, geo); 395 } 396 397 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 398 unsigned int cmd, unsigned long arg) 399 { 400 struct mapped_device *md = bdev->bd_disk->private_data; 401 struct dm_table *map = dm_get_live_table(md); 402 struct dm_target *tgt; 403 int r = -ENOTTY; 404 405 if (!map || !dm_table_get_size(map)) 406 goto out; 407 408 /* We only support devices that have a single target */ 409 if (dm_table_get_num_targets(map) != 1) 410 goto out; 411 412 tgt = dm_table_get_target(map, 0); 413 414 if (dm_suspended_md(md)) { 415 r = -EAGAIN; 416 goto out; 417 } 418 419 if (tgt->type->ioctl) 420 r = tgt->type->ioctl(tgt, cmd, arg); 421 422 out: 423 dm_table_put(map); 424 425 return r; 426 } 427 428 static struct dm_io *alloc_io(struct mapped_device *md) 429 { 430 return mempool_alloc(md->io_pool, GFP_NOIO); 431 } 432 433 static void free_io(struct mapped_device *md, struct dm_io *io) 434 { 435 mempool_free(io, md->io_pool); 436 } 437 438 static void free_tio(struct mapped_device *md, struct dm_target_io *tio) 439 { 440 mempool_free(tio, md->tio_pool); 441 } 442 443 static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, 444 gfp_t gfp_mask) 445 { 446 return mempool_alloc(md->tio_pool, gfp_mask); 447 } 448 449 static void free_rq_tio(struct dm_rq_target_io *tio) 450 { 451 mempool_free(tio, tio->md->tio_pool); 452 } 453 454 static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md) 455 { 456 return mempool_alloc(md->io_pool, GFP_ATOMIC); 457 } 458 459 static void free_bio_info(struct dm_rq_clone_bio_info *info) 460 { 461 mempool_free(info, info->tio->md->io_pool); 462 } 463 464 static int md_in_flight(struct mapped_device *md) 465 { 466 return atomic_read(&md->pending[READ]) + 467 atomic_read(&md->pending[WRITE]); 468 } 469 470 static void start_io_acct(struct dm_io *io) 471 { 472 struct mapped_device *md = io->md; 473 int cpu; 474 int rw = bio_data_dir(io->bio); 475 476 io->start_time = jiffies; 477 478 cpu = part_stat_lock(); 479 part_round_stats(cpu, &dm_disk(md)->part0); 480 part_stat_unlock(); 481 dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]); 482 } 483 484 static void end_io_acct(struct dm_io *io) 485 { 486 struct mapped_device *md = io->md; 487 struct bio *bio = io->bio; 488 unsigned long duration = jiffies - io->start_time; 489 int pending, cpu; 490 int rw = bio_data_dir(bio); 491 492 cpu = part_stat_lock(); 493 part_round_stats(cpu, &dm_disk(md)->part0); 494 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration); 495 part_stat_unlock(); 496 497 /* 498 * After this is decremented the bio must not be touched if it is 499 * a flush. 500 */ 501 dm_disk(md)->part0.in_flight[rw] = pending = 502 atomic_dec_return(&md->pending[rw]); 503 pending += atomic_read(&md->pending[rw^0x1]); 504 505 /* nudge anyone waiting on suspend queue */ 506 if (!pending) 507 wake_up(&md->wait); 508 } 509 510 /* 511 * Add the bio to the list of deferred io. 512 */ 513 static void queue_io(struct mapped_device *md, struct bio *bio) 514 { 515 unsigned long flags; 516 517 spin_lock_irqsave(&md->deferred_lock, flags); 518 bio_list_add(&md->deferred, bio); 519 spin_unlock_irqrestore(&md->deferred_lock, flags); 520 queue_work(md->wq, &md->work); 521 } 522 523 /* 524 * Everyone (including functions in this file), should use this 525 * function to access the md->map field, and make sure they call 526 * dm_table_put() when finished. 527 */ 528 struct dm_table *dm_get_live_table(struct mapped_device *md) 529 { 530 struct dm_table *t; 531 unsigned long flags; 532 533 read_lock_irqsave(&md->map_lock, flags); 534 t = md->map; 535 if (t) 536 dm_table_get(t); 537 read_unlock_irqrestore(&md->map_lock, flags); 538 539 return t; 540 } 541 542 /* 543 * Get the geometry associated with a dm device 544 */ 545 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 546 { 547 *geo = md->geometry; 548 549 return 0; 550 } 551 552 /* 553 * Set the geometry of a device. 554 */ 555 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 556 { 557 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 558 559 if (geo->start > sz) { 560 DMWARN("Start sector is beyond the geometry limits."); 561 return -EINVAL; 562 } 563 564 md->geometry = *geo; 565 566 return 0; 567 } 568 569 /*----------------------------------------------------------------- 570 * CRUD START: 571 * A more elegant soln is in the works that uses the queue 572 * merge fn, unfortunately there are a couple of changes to 573 * the block layer that I want to make for this. So in the 574 * interests of getting something for people to use I give 575 * you this clearly demarcated crap. 576 *---------------------------------------------------------------*/ 577 578 static int __noflush_suspending(struct mapped_device *md) 579 { 580 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 581 } 582 583 /* 584 * Decrements the number of outstanding ios that a bio has been 585 * cloned into, completing the original io if necc. 586 */ 587 static void dec_pending(struct dm_io *io, int error) 588 { 589 unsigned long flags; 590 int io_error; 591 struct bio *bio; 592 struct mapped_device *md = io->md; 593 594 /* Push-back supersedes any I/O errors */ 595 if (unlikely(error)) { 596 spin_lock_irqsave(&io->endio_lock, flags); 597 if (!(io->error > 0 && __noflush_suspending(md))) 598 io->error = error; 599 spin_unlock_irqrestore(&io->endio_lock, flags); 600 } 601 602 if (atomic_dec_and_test(&io->io_count)) { 603 if (io->error == DM_ENDIO_REQUEUE) { 604 /* 605 * Target requested pushing back the I/O. 606 */ 607 spin_lock_irqsave(&md->deferred_lock, flags); 608 if (__noflush_suspending(md)) 609 bio_list_add_head(&md->deferred, io->bio); 610 else 611 /* noflush suspend was interrupted. */ 612 io->error = -EIO; 613 spin_unlock_irqrestore(&md->deferred_lock, flags); 614 } 615 616 io_error = io->error; 617 bio = io->bio; 618 end_io_acct(io); 619 free_io(md, io); 620 621 if (io_error == DM_ENDIO_REQUEUE) 622 return; 623 624 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) { 625 /* 626 * Preflush done for flush with data, reissue 627 * without REQ_FLUSH. 628 */ 629 bio->bi_rw &= ~REQ_FLUSH; 630 queue_io(md, bio); 631 } else { 632 /* done with normal IO or empty flush */ 633 trace_block_bio_complete(md->queue, bio); 634 bio_endio(bio, io_error); 635 } 636 } 637 } 638 639 static void clone_endio(struct bio *bio, int error) 640 { 641 int r = 0; 642 struct dm_target_io *tio = bio->bi_private; 643 struct dm_io *io = tio->io; 644 struct mapped_device *md = tio->io->md; 645 dm_endio_fn endio = tio->ti->type->end_io; 646 647 if (!bio_flagged(bio, BIO_UPTODATE) && !error) 648 error = -EIO; 649 650 if (endio) { 651 r = endio(tio->ti, bio, error, &tio->info); 652 if (r < 0 || r == DM_ENDIO_REQUEUE) 653 /* 654 * error and requeue request are handled 655 * in dec_pending(). 656 */ 657 error = r; 658 else if (r == DM_ENDIO_INCOMPLETE) 659 /* The target will handle the io */ 660 return; 661 else if (r) { 662 DMWARN("unimplemented target endio return value: %d", r); 663 BUG(); 664 } 665 } 666 667 /* 668 * Store md for cleanup instead of tio which is about to get freed. 669 */ 670 bio->bi_private = md->bs; 671 672 free_tio(md, tio); 673 bio_put(bio); 674 dec_pending(io, error); 675 } 676 677 /* 678 * Partial completion handling for request-based dm 679 */ 680 static void end_clone_bio(struct bio *clone, int error) 681 { 682 struct dm_rq_clone_bio_info *info = clone->bi_private; 683 struct dm_rq_target_io *tio = info->tio; 684 struct bio *bio = info->orig; 685 unsigned int nr_bytes = info->orig->bi_size; 686 687 bio_put(clone); 688 689 if (tio->error) 690 /* 691 * An error has already been detected on the request. 692 * Once error occurred, just let clone->end_io() handle 693 * the remainder. 694 */ 695 return; 696 else if (error) { 697 /* 698 * Don't notice the error to the upper layer yet. 699 * The error handling decision is made by the target driver, 700 * when the request is completed. 701 */ 702 tio->error = error; 703 return; 704 } 705 706 /* 707 * I/O for the bio successfully completed. 708 * Notice the data completion to the upper layer. 709 */ 710 711 /* 712 * bios are processed from the head of the list. 713 * So the completing bio should always be rq->bio. 714 * If it's not, something wrong is happening. 715 */ 716 if (tio->orig->bio != bio) 717 DMERR("bio completion is going in the middle of the request"); 718 719 /* 720 * Update the original request. 721 * Do not use blk_end_request() here, because it may complete 722 * the original request before the clone, and break the ordering. 723 */ 724 blk_update_request(tio->orig, 0, nr_bytes); 725 } 726 727 /* 728 * Don't touch any member of the md after calling this function because 729 * the md may be freed in dm_put() at the end of this function. 730 * Or do dm_get() before calling this function and dm_put() later. 731 */ 732 static void rq_completed(struct mapped_device *md, int rw, int run_queue) 733 { 734 atomic_dec(&md->pending[rw]); 735 736 /* nudge anyone waiting on suspend queue */ 737 if (!md_in_flight(md)) 738 wake_up(&md->wait); 739 740 if (run_queue) 741 blk_run_queue(md->queue); 742 743 /* 744 * dm_put() must be at the end of this function. See the comment above 745 */ 746 dm_put(md); 747 } 748 749 static void free_rq_clone(struct request *clone) 750 { 751 struct dm_rq_target_io *tio = clone->end_io_data; 752 753 blk_rq_unprep_clone(clone); 754 free_rq_tio(tio); 755 } 756 757 /* 758 * Complete the clone and the original request. 759 * Must be called without queue lock. 760 */ 761 static void dm_end_request(struct request *clone, int error) 762 { 763 int rw = rq_data_dir(clone); 764 struct dm_rq_target_io *tio = clone->end_io_data; 765 struct mapped_device *md = tio->md; 766 struct request *rq = tio->orig; 767 768 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 769 rq->errors = clone->errors; 770 rq->resid_len = clone->resid_len; 771 772 if (rq->sense) 773 /* 774 * We are using the sense buffer of the original 775 * request. 776 * So setting the length of the sense data is enough. 777 */ 778 rq->sense_len = clone->sense_len; 779 } 780 781 free_rq_clone(clone); 782 blk_end_request_all(rq, error); 783 rq_completed(md, rw, true); 784 } 785 786 static void dm_unprep_request(struct request *rq) 787 { 788 struct request *clone = rq->special; 789 790 rq->special = NULL; 791 rq->cmd_flags &= ~REQ_DONTPREP; 792 793 free_rq_clone(clone); 794 } 795 796 /* 797 * Requeue the original request of a clone. 798 */ 799 void dm_requeue_unmapped_request(struct request *clone) 800 { 801 int rw = rq_data_dir(clone); 802 struct dm_rq_target_io *tio = clone->end_io_data; 803 struct mapped_device *md = tio->md; 804 struct request *rq = tio->orig; 805 struct request_queue *q = rq->q; 806 unsigned long flags; 807 808 dm_unprep_request(rq); 809 810 spin_lock_irqsave(q->queue_lock, flags); 811 if (elv_queue_empty(q)) 812 blk_plug_device(q); 813 blk_requeue_request(q, rq); 814 spin_unlock_irqrestore(q->queue_lock, flags); 815 816 rq_completed(md, rw, 0); 817 } 818 EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request); 819 820 static void __stop_queue(struct request_queue *q) 821 { 822 blk_stop_queue(q); 823 } 824 825 static void stop_queue(struct request_queue *q) 826 { 827 unsigned long flags; 828 829 spin_lock_irqsave(q->queue_lock, flags); 830 __stop_queue(q); 831 spin_unlock_irqrestore(q->queue_lock, flags); 832 } 833 834 static void __start_queue(struct request_queue *q) 835 { 836 if (blk_queue_stopped(q)) 837 blk_start_queue(q); 838 } 839 840 static void start_queue(struct request_queue *q) 841 { 842 unsigned long flags; 843 844 spin_lock_irqsave(q->queue_lock, flags); 845 __start_queue(q); 846 spin_unlock_irqrestore(q->queue_lock, flags); 847 } 848 849 static void dm_done(struct request *clone, int error, bool mapped) 850 { 851 int r = error; 852 struct dm_rq_target_io *tio = clone->end_io_data; 853 dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; 854 855 if (mapped && rq_end_io) 856 r = rq_end_io(tio->ti, clone, error, &tio->info); 857 858 if (r <= 0) 859 /* The target wants to complete the I/O */ 860 dm_end_request(clone, r); 861 else if (r == DM_ENDIO_INCOMPLETE) 862 /* The target will handle the I/O */ 863 return; 864 else if (r == DM_ENDIO_REQUEUE) 865 /* The target wants to requeue the I/O */ 866 dm_requeue_unmapped_request(clone); 867 else { 868 DMWARN("unimplemented target endio return value: %d", r); 869 BUG(); 870 } 871 } 872 873 /* 874 * Request completion handler for request-based dm 875 */ 876 static void dm_softirq_done(struct request *rq) 877 { 878 bool mapped = true; 879 struct request *clone = rq->completion_data; 880 struct dm_rq_target_io *tio = clone->end_io_data; 881 882 if (rq->cmd_flags & REQ_FAILED) 883 mapped = false; 884 885 dm_done(clone, tio->error, mapped); 886 } 887 888 /* 889 * Complete the clone and the original request with the error status 890 * through softirq context. 891 */ 892 static void dm_complete_request(struct request *clone, int error) 893 { 894 struct dm_rq_target_io *tio = clone->end_io_data; 895 struct request *rq = tio->orig; 896 897 tio->error = error; 898 rq->completion_data = clone; 899 blk_complete_request(rq); 900 } 901 902 /* 903 * Complete the not-mapped clone and the original request with the error status 904 * through softirq context. 905 * Target's rq_end_io() function isn't called. 906 * This may be used when the target's map_rq() function fails. 907 */ 908 void dm_kill_unmapped_request(struct request *clone, int error) 909 { 910 struct dm_rq_target_io *tio = clone->end_io_data; 911 struct request *rq = tio->orig; 912 913 rq->cmd_flags |= REQ_FAILED; 914 dm_complete_request(clone, error); 915 } 916 EXPORT_SYMBOL_GPL(dm_kill_unmapped_request); 917 918 /* 919 * Called with the queue lock held 920 */ 921 static void end_clone_request(struct request *clone, int error) 922 { 923 /* 924 * For just cleaning up the information of the queue in which 925 * the clone was dispatched. 926 * The clone is *NOT* freed actually here because it is alloced from 927 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags. 928 */ 929 __blk_put_request(clone->q, clone); 930 931 /* 932 * Actual request completion is done in a softirq context which doesn't 933 * hold the queue lock. Otherwise, deadlock could occur because: 934 * - another request may be submitted by the upper level driver 935 * of the stacking during the completion 936 * - the submission which requires queue lock may be done 937 * against this queue 938 */ 939 dm_complete_request(clone, error); 940 } 941 942 /* 943 * Return maximum size of I/O possible at the supplied sector up to the current 944 * target boundary. 945 */ 946 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 947 { 948 sector_t target_offset = dm_target_offset(ti, sector); 949 950 return ti->len - target_offset; 951 } 952 953 static sector_t max_io_len(sector_t sector, struct dm_target *ti) 954 { 955 sector_t len = max_io_len_target_boundary(sector, ti); 956 957 /* 958 * Does the target need to split even further ? 959 */ 960 if (ti->split_io) { 961 sector_t boundary; 962 sector_t offset = dm_target_offset(ti, sector); 963 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1)) 964 - offset; 965 if (len > boundary) 966 len = boundary; 967 } 968 969 return len; 970 } 971 972 static void __map_bio(struct dm_target *ti, struct bio *clone, 973 struct dm_target_io *tio) 974 { 975 int r; 976 sector_t sector; 977 struct mapped_device *md; 978 979 clone->bi_end_io = clone_endio; 980 clone->bi_private = tio; 981 982 /* 983 * Map the clone. If r == 0 we don't need to do 984 * anything, the target has assumed ownership of 985 * this io. 986 */ 987 atomic_inc(&tio->io->io_count); 988 sector = clone->bi_sector; 989 r = ti->type->map(ti, clone, &tio->info); 990 if (r == DM_MAPIO_REMAPPED) { 991 /* the bio has been remapped so dispatch it */ 992 993 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone, 994 tio->io->bio->bi_bdev->bd_dev, sector); 995 996 generic_make_request(clone); 997 } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 998 /* error the io and bail out, or requeue it if needed */ 999 md = tio->io->md; 1000 dec_pending(tio->io, r); 1001 /* 1002 * Store bio_set for cleanup. 1003 */ 1004 clone->bi_private = md->bs; 1005 bio_put(clone); 1006 free_tio(md, tio); 1007 } else if (r) { 1008 DMWARN("unimplemented target map return value: %d", r); 1009 BUG(); 1010 } 1011 } 1012 1013 struct clone_info { 1014 struct mapped_device *md; 1015 struct dm_table *map; 1016 struct bio *bio; 1017 struct dm_io *io; 1018 sector_t sector; 1019 sector_t sector_count; 1020 unsigned short idx; 1021 }; 1022 1023 static void dm_bio_destructor(struct bio *bio) 1024 { 1025 struct bio_set *bs = bio->bi_private; 1026 1027 bio_free(bio, bs); 1028 } 1029 1030 /* 1031 * Creates a little bio that just does part of a bvec. 1032 */ 1033 static struct bio *split_bvec(struct bio *bio, sector_t sector, 1034 unsigned short idx, unsigned int offset, 1035 unsigned int len, struct bio_set *bs) 1036 { 1037 struct bio *clone; 1038 struct bio_vec *bv = bio->bi_io_vec + idx; 1039 1040 clone = bio_alloc_bioset(GFP_NOIO, 1, bs); 1041 clone->bi_destructor = dm_bio_destructor; 1042 *clone->bi_io_vec = *bv; 1043 1044 clone->bi_sector = sector; 1045 clone->bi_bdev = bio->bi_bdev; 1046 clone->bi_rw = bio->bi_rw; 1047 clone->bi_vcnt = 1; 1048 clone->bi_size = to_bytes(len); 1049 clone->bi_io_vec->bv_offset = offset; 1050 clone->bi_io_vec->bv_len = clone->bi_size; 1051 clone->bi_flags |= 1 << BIO_CLONED; 1052 1053 if (bio_integrity(bio)) { 1054 bio_integrity_clone(clone, bio, GFP_NOIO, bs); 1055 bio_integrity_trim(clone, 1056 bio_sector_offset(bio, idx, offset), len); 1057 } 1058 1059 return clone; 1060 } 1061 1062 /* 1063 * Creates a bio that consists of range of complete bvecs. 1064 */ 1065 static struct bio *clone_bio(struct bio *bio, sector_t sector, 1066 unsigned short idx, unsigned short bv_count, 1067 unsigned int len, struct bio_set *bs) 1068 { 1069 struct bio *clone; 1070 1071 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); 1072 __bio_clone(clone, bio); 1073 clone->bi_destructor = dm_bio_destructor; 1074 clone->bi_sector = sector; 1075 clone->bi_idx = idx; 1076 clone->bi_vcnt = idx + bv_count; 1077 clone->bi_size = to_bytes(len); 1078 clone->bi_flags &= ~(1 << BIO_SEG_VALID); 1079 1080 if (bio_integrity(bio)) { 1081 bio_integrity_clone(clone, bio, GFP_NOIO, bs); 1082 1083 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) 1084 bio_integrity_trim(clone, 1085 bio_sector_offset(bio, idx, 0), len); 1086 } 1087 1088 return clone; 1089 } 1090 1091 static struct dm_target_io *alloc_tio(struct clone_info *ci, 1092 struct dm_target *ti) 1093 { 1094 struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO); 1095 1096 tio->io = ci->io; 1097 tio->ti = ti; 1098 memset(&tio->info, 0, sizeof(tio->info)); 1099 1100 return tio; 1101 } 1102 1103 static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, 1104 unsigned request_nr, sector_t len) 1105 { 1106 struct dm_target_io *tio = alloc_tio(ci, ti); 1107 struct bio *clone; 1108 1109 tio->info.target_request_nr = request_nr; 1110 1111 /* 1112 * Discard requests require the bio's inline iovecs be initialized. 1113 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush 1114 * and discard, so no need for concern about wasted bvec allocations. 1115 */ 1116 clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs); 1117 __bio_clone(clone, ci->bio); 1118 clone->bi_destructor = dm_bio_destructor; 1119 if (len) { 1120 clone->bi_sector = ci->sector; 1121 clone->bi_size = to_bytes(len); 1122 } 1123 1124 __map_bio(ti, clone, tio); 1125 } 1126 1127 static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti, 1128 unsigned num_requests, sector_t len) 1129 { 1130 unsigned request_nr; 1131 1132 for (request_nr = 0; request_nr < num_requests; request_nr++) 1133 __issue_target_request(ci, ti, request_nr, len); 1134 } 1135 1136 static int __clone_and_map_empty_flush(struct clone_info *ci) 1137 { 1138 unsigned target_nr = 0; 1139 struct dm_target *ti; 1140 1141 BUG_ON(bio_has_data(ci->bio)); 1142 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1143 __issue_target_requests(ci, ti, ti->num_flush_requests, 0); 1144 1145 return 0; 1146 } 1147 1148 /* 1149 * Perform all io with a single clone. 1150 */ 1151 static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti) 1152 { 1153 struct bio *clone, *bio = ci->bio; 1154 struct dm_target_io *tio; 1155 1156 tio = alloc_tio(ci, ti); 1157 clone = clone_bio(bio, ci->sector, ci->idx, 1158 bio->bi_vcnt - ci->idx, ci->sector_count, 1159 ci->md->bs); 1160 __map_bio(ti, clone, tio); 1161 ci->sector_count = 0; 1162 } 1163 1164 static int __clone_and_map_discard(struct clone_info *ci) 1165 { 1166 struct dm_target *ti; 1167 sector_t len; 1168 1169 do { 1170 ti = dm_table_find_target(ci->map, ci->sector); 1171 if (!dm_target_is_valid(ti)) 1172 return -EIO; 1173 1174 /* 1175 * Even though the device advertised discard support, 1176 * reconfiguration might have changed that since the 1177 * check was performed. 1178 */ 1179 if (!ti->num_discard_requests) 1180 return -EOPNOTSUPP; 1181 1182 len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 1183 1184 __issue_target_requests(ci, ti, ti->num_discard_requests, len); 1185 1186 ci->sector += len; 1187 } while (ci->sector_count -= len); 1188 1189 return 0; 1190 } 1191 1192 static int __clone_and_map(struct clone_info *ci) 1193 { 1194 struct bio *clone, *bio = ci->bio; 1195 struct dm_target *ti; 1196 sector_t len = 0, max; 1197 struct dm_target_io *tio; 1198 1199 if (unlikely(bio->bi_rw & REQ_DISCARD)) 1200 return __clone_and_map_discard(ci); 1201 1202 ti = dm_table_find_target(ci->map, ci->sector); 1203 if (!dm_target_is_valid(ti)) 1204 return -EIO; 1205 1206 max = max_io_len(ci->sector, ti); 1207 1208 if (ci->sector_count <= max) { 1209 /* 1210 * Optimise for the simple case where we can do all of 1211 * the remaining io with a single clone. 1212 */ 1213 __clone_and_map_simple(ci, ti); 1214 1215 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { 1216 /* 1217 * There are some bvecs that don't span targets. 1218 * Do as many of these as possible. 1219 */ 1220 int i; 1221 sector_t remaining = max; 1222 sector_t bv_len; 1223 1224 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) { 1225 bv_len = to_sector(bio->bi_io_vec[i].bv_len); 1226 1227 if (bv_len > remaining) 1228 break; 1229 1230 remaining -= bv_len; 1231 len += bv_len; 1232 } 1233 1234 tio = alloc_tio(ci, ti); 1235 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, 1236 ci->md->bs); 1237 __map_bio(ti, clone, tio); 1238 1239 ci->sector += len; 1240 ci->sector_count -= len; 1241 ci->idx = i; 1242 1243 } else { 1244 /* 1245 * Handle a bvec that must be split between two or more targets. 1246 */ 1247 struct bio_vec *bv = bio->bi_io_vec + ci->idx; 1248 sector_t remaining = to_sector(bv->bv_len); 1249 unsigned int offset = 0; 1250 1251 do { 1252 if (offset) { 1253 ti = dm_table_find_target(ci->map, ci->sector); 1254 if (!dm_target_is_valid(ti)) 1255 return -EIO; 1256 1257 max = max_io_len(ci->sector, ti); 1258 } 1259 1260 len = min(remaining, max); 1261 1262 tio = alloc_tio(ci, ti); 1263 clone = split_bvec(bio, ci->sector, ci->idx, 1264 bv->bv_offset + offset, len, 1265 ci->md->bs); 1266 1267 __map_bio(ti, clone, tio); 1268 1269 ci->sector += len; 1270 ci->sector_count -= len; 1271 offset += to_bytes(len); 1272 } while (remaining -= len); 1273 1274 ci->idx++; 1275 } 1276 1277 return 0; 1278 } 1279 1280 /* 1281 * Split the bio into several clones and submit it to targets. 1282 */ 1283 static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) 1284 { 1285 struct clone_info ci; 1286 int error = 0; 1287 1288 ci.map = dm_get_live_table(md); 1289 if (unlikely(!ci.map)) { 1290 bio_io_error(bio); 1291 return; 1292 } 1293 1294 ci.md = md; 1295 ci.io = alloc_io(md); 1296 ci.io->error = 0; 1297 atomic_set(&ci.io->io_count, 1); 1298 ci.io->bio = bio; 1299 ci.io->md = md; 1300 spin_lock_init(&ci.io->endio_lock); 1301 ci.sector = bio->bi_sector; 1302 ci.idx = bio->bi_idx; 1303 1304 start_io_acct(ci.io); 1305 if (bio->bi_rw & REQ_FLUSH) { 1306 ci.bio = &ci.md->flush_bio; 1307 ci.sector_count = 0; 1308 error = __clone_and_map_empty_flush(&ci); 1309 /* dec_pending submits any data associated with flush */ 1310 } else { 1311 ci.bio = bio; 1312 ci.sector_count = bio_sectors(bio); 1313 while (ci.sector_count && !error) 1314 error = __clone_and_map(&ci); 1315 } 1316 1317 /* drop the extra reference count */ 1318 dec_pending(ci.io, error); 1319 dm_table_put(ci.map); 1320 } 1321 /*----------------------------------------------------------------- 1322 * CRUD END 1323 *---------------------------------------------------------------*/ 1324 1325 static int dm_merge_bvec(struct request_queue *q, 1326 struct bvec_merge_data *bvm, 1327 struct bio_vec *biovec) 1328 { 1329 struct mapped_device *md = q->queuedata; 1330 struct dm_table *map = dm_get_live_table(md); 1331 struct dm_target *ti; 1332 sector_t max_sectors; 1333 int max_size = 0; 1334 1335 if (unlikely(!map)) 1336 goto out; 1337 1338 ti = dm_table_find_target(map, bvm->bi_sector); 1339 if (!dm_target_is_valid(ti)) 1340 goto out_table; 1341 1342 /* 1343 * Find maximum amount of I/O that won't need splitting 1344 */ 1345 max_sectors = min(max_io_len(bvm->bi_sector, ti), 1346 (sector_t) BIO_MAX_SECTORS); 1347 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 1348 if (max_size < 0) 1349 max_size = 0; 1350 1351 /* 1352 * merge_bvec_fn() returns number of bytes 1353 * it can accept at this offset 1354 * max is precomputed maximal io size 1355 */ 1356 if (max_size && ti->type->merge) 1357 max_size = ti->type->merge(ti, bvm, biovec, max_size); 1358 /* 1359 * If the target doesn't support merge method and some of the devices 1360 * provided their merge_bvec method (we know this by looking at 1361 * queue_max_hw_sectors), then we can't allow bios with multiple vector 1362 * entries. So always set max_size to 0, and the code below allows 1363 * just one page. 1364 */ 1365 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) 1366 1367 max_size = 0; 1368 1369 out_table: 1370 dm_table_put(map); 1371 1372 out: 1373 /* 1374 * Always allow an entire first page 1375 */ 1376 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) 1377 max_size = biovec->bv_len; 1378 1379 return max_size; 1380 } 1381 1382 /* 1383 * The request function that just remaps the bio built up by 1384 * dm_merge_bvec. 1385 */ 1386 static int _dm_request(struct request_queue *q, struct bio *bio) 1387 { 1388 int rw = bio_data_dir(bio); 1389 struct mapped_device *md = q->queuedata; 1390 int cpu; 1391 1392 down_read(&md->io_lock); 1393 1394 cpu = part_stat_lock(); 1395 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]); 1396 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio)); 1397 part_stat_unlock(); 1398 1399 /* if we're suspended, we have to queue this io for later */ 1400 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 1401 up_read(&md->io_lock); 1402 1403 if (bio_rw(bio) != READA) 1404 queue_io(md, bio); 1405 else 1406 bio_io_error(bio); 1407 return 0; 1408 } 1409 1410 __split_and_process_bio(md, bio); 1411 up_read(&md->io_lock); 1412 return 0; 1413 } 1414 1415 static int dm_make_request(struct request_queue *q, struct bio *bio) 1416 { 1417 struct mapped_device *md = q->queuedata; 1418 1419 return md->saved_make_request_fn(q, bio); /* call __make_request() */ 1420 } 1421 1422 static int dm_request_based(struct mapped_device *md) 1423 { 1424 return blk_queue_stackable(md->queue); 1425 } 1426 1427 static int dm_request(struct request_queue *q, struct bio *bio) 1428 { 1429 struct mapped_device *md = q->queuedata; 1430 1431 if (dm_request_based(md)) 1432 return dm_make_request(q, bio); 1433 1434 return _dm_request(q, bio); 1435 } 1436 1437 void dm_dispatch_request(struct request *rq) 1438 { 1439 int r; 1440 1441 if (blk_queue_io_stat(rq->q)) 1442 rq->cmd_flags |= REQ_IO_STAT; 1443 1444 rq->start_time = jiffies; 1445 r = blk_insert_cloned_request(rq->q, rq); 1446 if (r) 1447 dm_complete_request(rq, r); 1448 } 1449 EXPORT_SYMBOL_GPL(dm_dispatch_request); 1450 1451 static void dm_rq_bio_destructor(struct bio *bio) 1452 { 1453 struct dm_rq_clone_bio_info *info = bio->bi_private; 1454 struct mapped_device *md = info->tio->md; 1455 1456 free_bio_info(info); 1457 bio_free(bio, md->bs); 1458 } 1459 1460 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, 1461 void *data) 1462 { 1463 struct dm_rq_target_io *tio = data; 1464 struct mapped_device *md = tio->md; 1465 struct dm_rq_clone_bio_info *info = alloc_bio_info(md); 1466 1467 if (!info) 1468 return -ENOMEM; 1469 1470 info->orig = bio_orig; 1471 info->tio = tio; 1472 bio->bi_end_io = end_clone_bio; 1473 bio->bi_private = info; 1474 bio->bi_destructor = dm_rq_bio_destructor; 1475 1476 return 0; 1477 } 1478 1479 static int setup_clone(struct request *clone, struct request *rq, 1480 struct dm_rq_target_io *tio) 1481 { 1482 int r; 1483 1484 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC, 1485 dm_rq_bio_constructor, tio); 1486 if (r) 1487 return r; 1488 1489 clone->cmd = rq->cmd; 1490 clone->cmd_len = rq->cmd_len; 1491 clone->sense = rq->sense; 1492 clone->buffer = rq->buffer; 1493 clone->end_io = end_clone_request; 1494 clone->end_io_data = tio; 1495 1496 return 0; 1497 } 1498 1499 static struct request *clone_rq(struct request *rq, struct mapped_device *md, 1500 gfp_t gfp_mask) 1501 { 1502 struct request *clone; 1503 struct dm_rq_target_io *tio; 1504 1505 tio = alloc_rq_tio(md, gfp_mask); 1506 if (!tio) 1507 return NULL; 1508 1509 tio->md = md; 1510 tio->ti = NULL; 1511 tio->orig = rq; 1512 tio->error = 0; 1513 memset(&tio->info, 0, sizeof(tio->info)); 1514 1515 clone = &tio->clone; 1516 if (setup_clone(clone, rq, tio)) { 1517 /* -ENOMEM */ 1518 free_rq_tio(tio); 1519 return NULL; 1520 } 1521 1522 return clone; 1523 } 1524 1525 /* 1526 * Called with the queue lock held. 1527 */ 1528 static int dm_prep_fn(struct request_queue *q, struct request *rq) 1529 { 1530 struct mapped_device *md = q->queuedata; 1531 struct request *clone; 1532 1533 if (unlikely(rq->special)) { 1534 DMWARN("Already has something in rq->special."); 1535 return BLKPREP_KILL; 1536 } 1537 1538 clone = clone_rq(rq, md, GFP_ATOMIC); 1539 if (!clone) 1540 return BLKPREP_DEFER; 1541 1542 rq->special = clone; 1543 rq->cmd_flags |= REQ_DONTPREP; 1544 1545 return BLKPREP_OK; 1546 } 1547 1548 /* 1549 * Returns: 1550 * 0 : the request has been processed (not requeued) 1551 * !0 : the request has been requeued 1552 */ 1553 static int map_request(struct dm_target *ti, struct request *clone, 1554 struct mapped_device *md) 1555 { 1556 int r, requeued = 0; 1557 struct dm_rq_target_io *tio = clone->end_io_data; 1558 1559 /* 1560 * Hold the md reference here for the in-flight I/O. 1561 * We can't rely on the reference count by device opener, 1562 * because the device may be closed during the request completion 1563 * when all bios are completed. 1564 * See the comment in rq_completed() too. 1565 */ 1566 dm_get(md); 1567 1568 tio->ti = ti; 1569 r = ti->type->map_rq(ti, clone, &tio->info); 1570 switch (r) { 1571 case DM_MAPIO_SUBMITTED: 1572 /* The target has taken the I/O to submit by itself later */ 1573 break; 1574 case DM_MAPIO_REMAPPED: 1575 /* The target has remapped the I/O so dispatch it */ 1576 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 1577 blk_rq_pos(tio->orig)); 1578 dm_dispatch_request(clone); 1579 break; 1580 case DM_MAPIO_REQUEUE: 1581 /* The target wants to requeue the I/O */ 1582 dm_requeue_unmapped_request(clone); 1583 requeued = 1; 1584 break; 1585 default: 1586 if (r > 0) { 1587 DMWARN("unimplemented target map return value: %d", r); 1588 BUG(); 1589 } 1590 1591 /* The target wants to complete the I/O */ 1592 dm_kill_unmapped_request(clone, r); 1593 break; 1594 } 1595 1596 return requeued; 1597 } 1598 1599 /* 1600 * q->request_fn for request-based dm. 1601 * Called with the queue lock held. 1602 */ 1603 static void dm_request_fn(struct request_queue *q) 1604 { 1605 struct mapped_device *md = q->queuedata; 1606 struct dm_table *map = dm_get_live_table(md); 1607 struct dm_target *ti; 1608 struct request *rq, *clone; 1609 sector_t pos; 1610 1611 /* 1612 * For suspend, check blk_queue_stopped() and increment 1613 * ->pending within a single queue_lock not to increment the 1614 * number of in-flight I/Os after the queue is stopped in 1615 * dm_suspend(). 1616 */ 1617 while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) { 1618 rq = blk_peek_request(q); 1619 if (!rq) 1620 goto plug_and_out; 1621 1622 /* always use block 0 to find the target for flushes for now */ 1623 pos = 0; 1624 if (!(rq->cmd_flags & REQ_FLUSH)) 1625 pos = blk_rq_pos(rq); 1626 1627 ti = dm_table_find_target(map, pos); 1628 BUG_ON(!dm_target_is_valid(ti)); 1629 1630 if (ti->type->busy && ti->type->busy(ti)) 1631 goto plug_and_out; 1632 1633 blk_start_request(rq); 1634 clone = rq->special; 1635 atomic_inc(&md->pending[rq_data_dir(clone)]); 1636 1637 spin_unlock(q->queue_lock); 1638 if (map_request(ti, clone, md)) 1639 goto requeued; 1640 1641 spin_lock_irq(q->queue_lock); 1642 } 1643 1644 goto out; 1645 1646 requeued: 1647 spin_lock_irq(q->queue_lock); 1648 1649 plug_and_out: 1650 if (!elv_queue_empty(q)) 1651 /* Some requests still remain, retry later */ 1652 blk_plug_device(q); 1653 1654 out: 1655 dm_table_put(map); 1656 1657 return; 1658 } 1659 1660 int dm_underlying_device_busy(struct request_queue *q) 1661 { 1662 return blk_lld_busy(q); 1663 } 1664 EXPORT_SYMBOL_GPL(dm_underlying_device_busy); 1665 1666 static int dm_lld_busy(struct request_queue *q) 1667 { 1668 int r; 1669 struct mapped_device *md = q->queuedata; 1670 struct dm_table *map = dm_get_live_table(md); 1671 1672 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) 1673 r = 1; 1674 else 1675 r = dm_table_any_busy_target(map); 1676 1677 dm_table_put(map); 1678 1679 return r; 1680 } 1681 1682 static void dm_unplug_all(struct request_queue *q) 1683 { 1684 struct mapped_device *md = q->queuedata; 1685 struct dm_table *map = dm_get_live_table(md); 1686 1687 if (map) { 1688 if (dm_request_based(md)) 1689 generic_unplug_device(q); 1690 1691 dm_table_unplug_all(map); 1692 dm_table_put(map); 1693 } 1694 } 1695 1696 static int dm_any_congested(void *congested_data, int bdi_bits) 1697 { 1698 int r = bdi_bits; 1699 struct mapped_device *md = congested_data; 1700 struct dm_table *map; 1701 1702 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 1703 map = dm_get_live_table(md); 1704 if (map) { 1705 /* 1706 * Request-based dm cares about only own queue for 1707 * the query about congestion status of request_queue 1708 */ 1709 if (dm_request_based(md)) 1710 r = md->queue->backing_dev_info.state & 1711 bdi_bits; 1712 else 1713 r = dm_table_any_congested(map, bdi_bits); 1714 1715 dm_table_put(map); 1716 } 1717 } 1718 1719 return r; 1720 } 1721 1722 /*----------------------------------------------------------------- 1723 * An IDR is used to keep track of allocated minor numbers. 1724 *---------------------------------------------------------------*/ 1725 static DEFINE_IDR(_minor_idr); 1726 1727 static void free_minor(int minor) 1728 { 1729 spin_lock(&_minor_lock); 1730 idr_remove(&_minor_idr, minor); 1731 spin_unlock(&_minor_lock); 1732 } 1733 1734 /* 1735 * See if the device with a specific minor # is free. 1736 */ 1737 static int specific_minor(int minor) 1738 { 1739 int r, m; 1740 1741 if (minor >= (1 << MINORBITS)) 1742 return -EINVAL; 1743 1744 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 1745 if (!r) 1746 return -ENOMEM; 1747 1748 spin_lock(&_minor_lock); 1749 1750 if (idr_find(&_minor_idr, minor)) { 1751 r = -EBUSY; 1752 goto out; 1753 } 1754 1755 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m); 1756 if (r) 1757 goto out; 1758 1759 if (m != minor) { 1760 idr_remove(&_minor_idr, m); 1761 r = -EBUSY; 1762 goto out; 1763 } 1764 1765 out: 1766 spin_unlock(&_minor_lock); 1767 return r; 1768 } 1769 1770 static int next_free_minor(int *minor) 1771 { 1772 int r, m; 1773 1774 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 1775 if (!r) 1776 return -ENOMEM; 1777 1778 spin_lock(&_minor_lock); 1779 1780 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m); 1781 if (r) 1782 goto out; 1783 1784 if (m >= (1 << MINORBITS)) { 1785 idr_remove(&_minor_idr, m); 1786 r = -ENOSPC; 1787 goto out; 1788 } 1789 1790 *minor = m; 1791 1792 out: 1793 spin_unlock(&_minor_lock); 1794 return r; 1795 } 1796 1797 static const struct block_device_operations dm_blk_dops; 1798 1799 static void dm_wq_work(struct work_struct *work); 1800 1801 static void dm_init_md_queue(struct mapped_device *md) 1802 { 1803 /* 1804 * Request-based dm devices cannot be stacked on top of bio-based dm 1805 * devices. The type of this dm device has not been decided yet. 1806 * The type is decided at the first table loading time. 1807 * To prevent problematic device stacking, clear the queue flag 1808 * for request stacking support until then. 1809 * 1810 * This queue is new, so no concurrency on the queue_flags. 1811 */ 1812 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 1813 1814 md->queue->queuedata = md; 1815 md->queue->backing_dev_info.congested_fn = dm_any_congested; 1816 md->queue->backing_dev_info.congested_data = md; 1817 blk_queue_make_request(md->queue, dm_request); 1818 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 1819 md->queue->unplug_fn = dm_unplug_all; 1820 blk_queue_merge_bvec(md->queue, dm_merge_bvec); 1821 blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA); 1822 } 1823 1824 /* 1825 * Allocate and initialise a blank device with a given minor. 1826 */ 1827 static struct mapped_device *alloc_dev(int minor) 1828 { 1829 int r; 1830 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); 1831 void *old_md; 1832 1833 if (!md) { 1834 DMWARN("unable to allocate device, out of memory."); 1835 return NULL; 1836 } 1837 1838 if (!try_module_get(THIS_MODULE)) 1839 goto bad_module_get; 1840 1841 /* get a minor number for the dev */ 1842 if (minor == DM_ANY_MINOR) 1843 r = next_free_minor(&minor); 1844 else 1845 r = specific_minor(minor); 1846 if (r < 0) 1847 goto bad_minor; 1848 1849 md->type = DM_TYPE_NONE; 1850 init_rwsem(&md->io_lock); 1851 mutex_init(&md->suspend_lock); 1852 mutex_init(&md->type_lock); 1853 spin_lock_init(&md->deferred_lock); 1854 rwlock_init(&md->map_lock); 1855 atomic_set(&md->holders, 1); 1856 atomic_set(&md->open_count, 0); 1857 atomic_set(&md->event_nr, 0); 1858 atomic_set(&md->uevent_seq, 0); 1859 INIT_LIST_HEAD(&md->uevent_list); 1860 spin_lock_init(&md->uevent_lock); 1861 1862 md->queue = blk_alloc_queue(GFP_KERNEL); 1863 if (!md->queue) 1864 goto bad_queue; 1865 1866 dm_init_md_queue(md); 1867 1868 md->disk = alloc_disk(1); 1869 if (!md->disk) 1870 goto bad_disk; 1871 1872 atomic_set(&md->pending[0], 0); 1873 atomic_set(&md->pending[1], 0); 1874 init_waitqueue_head(&md->wait); 1875 INIT_WORK(&md->work, dm_wq_work); 1876 init_waitqueue_head(&md->eventq); 1877 1878 md->disk->major = _major; 1879 md->disk->first_minor = minor; 1880 md->disk->fops = &dm_blk_dops; 1881 md->disk->queue = md->queue; 1882 md->disk->private_data = md; 1883 sprintf(md->disk->disk_name, "dm-%d", minor); 1884 add_disk(md->disk); 1885 format_dev_t(md->name, MKDEV(_major, minor)); 1886 1887 md->wq = create_singlethread_workqueue("kdmflush"); 1888 if (!md->wq) 1889 goto bad_thread; 1890 1891 md->bdev = bdget_disk(md->disk, 0); 1892 if (!md->bdev) 1893 goto bad_bdev; 1894 1895 bio_init(&md->flush_bio); 1896 md->flush_bio.bi_bdev = md->bdev; 1897 md->flush_bio.bi_rw = WRITE_FLUSH; 1898 1899 /* Populate the mapping, nobody knows we exist yet */ 1900 spin_lock(&_minor_lock); 1901 old_md = idr_replace(&_minor_idr, md, minor); 1902 spin_unlock(&_minor_lock); 1903 1904 BUG_ON(old_md != MINOR_ALLOCED); 1905 1906 return md; 1907 1908 bad_bdev: 1909 destroy_workqueue(md->wq); 1910 bad_thread: 1911 del_gendisk(md->disk); 1912 put_disk(md->disk); 1913 bad_disk: 1914 blk_cleanup_queue(md->queue); 1915 bad_queue: 1916 free_minor(minor); 1917 bad_minor: 1918 module_put(THIS_MODULE); 1919 bad_module_get: 1920 kfree(md); 1921 return NULL; 1922 } 1923 1924 static void unlock_fs(struct mapped_device *md); 1925 1926 static void free_dev(struct mapped_device *md) 1927 { 1928 int minor = MINOR(disk_devt(md->disk)); 1929 1930 unlock_fs(md); 1931 bdput(md->bdev); 1932 destroy_workqueue(md->wq); 1933 if (md->tio_pool) 1934 mempool_destroy(md->tio_pool); 1935 if (md->io_pool) 1936 mempool_destroy(md->io_pool); 1937 if (md->bs) 1938 bioset_free(md->bs); 1939 blk_integrity_unregister(md->disk); 1940 del_gendisk(md->disk); 1941 free_minor(minor); 1942 1943 spin_lock(&_minor_lock); 1944 md->disk->private_data = NULL; 1945 spin_unlock(&_minor_lock); 1946 1947 put_disk(md->disk); 1948 blk_cleanup_queue(md->queue); 1949 module_put(THIS_MODULE); 1950 kfree(md); 1951 } 1952 1953 static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 1954 { 1955 struct dm_md_mempools *p; 1956 1957 if (md->io_pool && md->tio_pool && md->bs) 1958 /* the md already has necessary mempools */ 1959 goto out; 1960 1961 p = dm_table_get_md_mempools(t); 1962 BUG_ON(!p || md->io_pool || md->tio_pool || md->bs); 1963 1964 md->io_pool = p->io_pool; 1965 p->io_pool = NULL; 1966 md->tio_pool = p->tio_pool; 1967 p->tio_pool = NULL; 1968 md->bs = p->bs; 1969 p->bs = NULL; 1970 1971 out: 1972 /* mempool bind completed, now no need any mempools in the table */ 1973 dm_table_free_md_mempools(t); 1974 } 1975 1976 /* 1977 * Bind a table to the device. 1978 */ 1979 static void event_callback(void *context) 1980 { 1981 unsigned long flags; 1982 LIST_HEAD(uevents); 1983 struct mapped_device *md = (struct mapped_device *) context; 1984 1985 spin_lock_irqsave(&md->uevent_lock, flags); 1986 list_splice_init(&md->uevent_list, &uevents); 1987 spin_unlock_irqrestore(&md->uevent_lock, flags); 1988 1989 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 1990 1991 atomic_inc(&md->event_nr); 1992 wake_up(&md->eventq); 1993 } 1994 1995 static void __set_size(struct mapped_device *md, sector_t size) 1996 { 1997 set_capacity(md->disk, size); 1998 1999 mutex_lock(&md->bdev->bd_inode->i_mutex); 2000 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 2001 mutex_unlock(&md->bdev->bd_inode->i_mutex); 2002 } 2003 2004 /* 2005 * Returns old map, which caller must destroy. 2006 */ 2007 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2008 struct queue_limits *limits) 2009 { 2010 struct dm_table *old_map; 2011 struct request_queue *q = md->queue; 2012 sector_t size; 2013 unsigned long flags; 2014 2015 size = dm_table_get_size(t); 2016 2017 /* 2018 * Wipe any geometry if the size of the table changed. 2019 */ 2020 if (size != get_capacity(md->disk)) 2021 memset(&md->geometry, 0, sizeof(md->geometry)); 2022 2023 __set_size(md, size); 2024 2025 dm_table_event_callback(t, event_callback, md); 2026 2027 /* 2028 * The queue hasn't been stopped yet, if the old table type wasn't 2029 * for request-based during suspension. So stop it to prevent 2030 * I/O mapping before resume. 2031 * This must be done before setting the queue restrictions, 2032 * because request-based dm may be run just after the setting. 2033 */ 2034 if (dm_table_request_based(t) && !blk_queue_stopped(q)) 2035 stop_queue(q); 2036 2037 __bind_mempools(md, t); 2038 2039 write_lock_irqsave(&md->map_lock, flags); 2040 old_map = md->map; 2041 md->map = t; 2042 dm_table_set_restrictions(t, q, limits); 2043 write_unlock_irqrestore(&md->map_lock, flags); 2044 2045 return old_map; 2046 } 2047 2048 /* 2049 * Returns unbound table for the caller to free. 2050 */ 2051 static struct dm_table *__unbind(struct mapped_device *md) 2052 { 2053 struct dm_table *map = md->map; 2054 unsigned long flags; 2055 2056 if (!map) 2057 return NULL; 2058 2059 dm_table_event_callback(map, NULL, NULL); 2060 write_lock_irqsave(&md->map_lock, flags); 2061 md->map = NULL; 2062 write_unlock_irqrestore(&md->map_lock, flags); 2063 2064 return map; 2065 } 2066 2067 /* 2068 * Constructor for a new device. 2069 */ 2070 int dm_create(int minor, struct mapped_device **result) 2071 { 2072 struct mapped_device *md; 2073 2074 md = alloc_dev(minor); 2075 if (!md) 2076 return -ENXIO; 2077 2078 dm_sysfs_init(md); 2079 2080 *result = md; 2081 return 0; 2082 } 2083 2084 /* 2085 * Functions to manage md->type. 2086 * All are required to hold md->type_lock. 2087 */ 2088 void dm_lock_md_type(struct mapped_device *md) 2089 { 2090 mutex_lock(&md->type_lock); 2091 } 2092 2093 void dm_unlock_md_type(struct mapped_device *md) 2094 { 2095 mutex_unlock(&md->type_lock); 2096 } 2097 2098 void dm_set_md_type(struct mapped_device *md, unsigned type) 2099 { 2100 md->type = type; 2101 } 2102 2103 unsigned dm_get_md_type(struct mapped_device *md) 2104 { 2105 return md->type; 2106 } 2107 2108 /* 2109 * Fully initialize a request-based queue (->elevator, ->request_fn, etc). 2110 */ 2111 static int dm_init_request_based_queue(struct mapped_device *md) 2112 { 2113 struct request_queue *q = NULL; 2114 2115 if (md->queue->elevator) 2116 return 1; 2117 2118 /* Fully initialize the queue */ 2119 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); 2120 if (!q) 2121 return 0; 2122 2123 md->queue = q; 2124 md->saved_make_request_fn = md->queue->make_request_fn; 2125 dm_init_md_queue(md); 2126 blk_queue_softirq_done(md->queue, dm_softirq_done); 2127 blk_queue_prep_rq(md->queue, dm_prep_fn); 2128 blk_queue_lld_busy(md->queue, dm_lld_busy); 2129 2130 elv_register_queue(md->queue); 2131 2132 return 1; 2133 } 2134 2135 /* 2136 * Setup the DM device's queue based on md's type 2137 */ 2138 int dm_setup_md_queue(struct mapped_device *md) 2139 { 2140 if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) && 2141 !dm_init_request_based_queue(md)) { 2142 DMWARN("Cannot initialize queue for request-based mapped device"); 2143 return -EINVAL; 2144 } 2145 2146 return 0; 2147 } 2148 2149 static struct mapped_device *dm_find_md(dev_t dev) 2150 { 2151 struct mapped_device *md; 2152 unsigned minor = MINOR(dev); 2153 2154 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 2155 return NULL; 2156 2157 spin_lock(&_minor_lock); 2158 2159 md = idr_find(&_minor_idr, minor); 2160 if (md && (md == MINOR_ALLOCED || 2161 (MINOR(disk_devt(dm_disk(md))) != minor) || 2162 dm_deleting_md(md) || 2163 test_bit(DMF_FREEING, &md->flags))) { 2164 md = NULL; 2165 goto out; 2166 } 2167 2168 out: 2169 spin_unlock(&_minor_lock); 2170 2171 return md; 2172 } 2173 2174 struct mapped_device *dm_get_md(dev_t dev) 2175 { 2176 struct mapped_device *md = dm_find_md(dev); 2177 2178 if (md) 2179 dm_get(md); 2180 2181 return md; 2182 } 2183 2184 void *dm_get_mdptr(struct mapped_device *md) 2185 { 2186 return md->interface_ptr; 2187 } 2188 2189 void dm_set_mdptr(struct mapped_device *md, void *ptr) 2190 { 2191 md->interface_ptr = ptr; 2192 } 2193 2194 void dm_get(struct mapped_device *md) 2195 { 2196 atomic_inc(&md->holders); 2197 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2198 } 2199 2200 const char *dm_device_name(struct mapped_device *md) 2201 { 2202 return md->name; 2203 } 2204 EXPORT_SYMBOL_GPL(dm_device_name); 2205 2206 static void __dm_destroy(struct mapped_device *md, bool wait) 2207 { 2208 struct dm_table *map; 2209 2210 might_sleep(); 2211 2212 spin_lock(&_minor_lock); 2213 map = dm_get_live_table(md); 2214 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2215 set_bit(DMF_FREEING, &md->flags); 2216 spin_unlock(&_minor_lock); 2217 2218 if (!dm_suspended_md(md)) { 2219 dm_table_presuspend_targets(map); 2220 dm_table_postsuspend_targets(map); 2221 } 2222 2223 /* 2224 * Rare, but there may be I/O requests still going to complete, 2225 * for example. Wait for all references to disappear. 2226 * No one should increment the reference count of the mapped_device, 2227 * after the mapped_device state becomes DMF_FREEING. 2228 */ 2229 if (wait) 2230 while (atomic_read(&md->holders)) 2231 msleep(1); 2232 else if (atomic_read(&md->holders)) 2233 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2234 dm_device_name(md), atomic_read(&md->holders)); 2235 2236 dm_sysfs_exit(md); 2237 dm_table_put(map); 2238 dm_table_destroy(__unbind(md)); 2239 free_dev(md); 2240 } 2241 2242 void dm_destroy(struct mapped_device *md) 2243 { 2244 __dm_destroy(md, true); 2245 } 2246 2247 void dm_destroy_immediate(struct mapped_device *md) 2248 { 2249 __dm_destroy(md, false); 2250 } 2251 2252 void dm_put(struct mapped_device *md) 2253 { 2254 atomic_dec(&md->holders); 2255 } 2256 EXPORT_SYMBOL_GPL(dm_put); 2257 2258 static int dm_wait_for_completion(struct mapped_device *md, int interruptible) 2259 { 2260 int r = 0; 2261 DECLARE_WAITQUEUE(wait, current); 2262 2263 dm_unplug_all(md->queue); 2264 2265 add_wait_queue(&md->wait, &wait); 2266 2267 while (1) { 2268 set_current_state(interruptible); 2269 2270 smp_mb(); 2271 if (!md_in_flight(md)) 2272 break; 2273 2274 if (interruptible == TASK_INTERRUPTIBLE && 2275 signal_pending(current)) { 2276 r = -EINTR; 2277 break; 2278 } 2279 2280 io_schedule(); 2281 } 2282 set_current_state(TASK_RUNNING); 2283 2284 remove_wait_queue(&md->wait, &wait); 2285 2286 return r; 2287 } 2288 2289 /* 2290 * Process the deferred bios 2291 */ 2292 static void dm_wq_work(struct work_struct *work) 2293 { 2294 struct mapped_device *md = container_of(work, struct mapped_device, 2295 work); 2296 struct bio *c; 2297 2298 down_read(&md->io_lock); 2299 2300 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2301 spin_lock_irq(&md->deferred_lock); 2302 c = bio_list_pop(&md->deferred); 2303 spin_unlock_irq(&md->deferred_lock); 2304 2305 if (!c) 2306 break; 2307 2308 up_read(&md->io_lock); 2309 2310 if (dm_request_based(md)) 2311 generic_make_request(c); 2312 else 2313 __split_and_process_bio(md, c); 2314 2315 down_read(&md->io_lock); 2316 } 2317 2318 up_read(&md->io_lock); 2319 } 2320 2321 static void dm_queue_flush(struct mapped_device *md) 2322 { 2323 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2324 smp_mb__after_clear_bit(); 2325 queue_work(md->wq, &md->work); 2326 } 2327 2328 /* 2329 * Swap in a new table, returning the old one for the caller to destroy. 2330 */ 2331 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 2332 { 2333 struct dm_table *map = ERR_PTR(-EINVAL); 2334 struct queue_limits limits; 2335 int r; 2336 2337 mutex_lock(&md->suspend_lock); 2338 2339 /* device must be suspended */ 2340 if (!dm_suspended_md(md)) 2341 goto out; 2342 2343 r = dm_calculate_queue_limits(table, &limits); 2344 if (r) { 2345 map = ERR_PTR(r); 2346 goto out; 2347 } 2348 2349 map = __bind(md, table, &limits); 2350 2351 out: 2352 mutex_unlock(&md->suspend_lock); 2353 return map; 2354 } 2355 2356 /* 2357 * Functions to lock and unlock any filesystem running on the 2358 * device. 2359 */ 2360 static int lock_fs(struct mapped_device *md) 2361 { 2362 int r; 2363 2364 WARN_ON(md->frozen_sb); 2365 2366 md->frozen_sb = freeze_bdev(md->bdev); 2367 if (IS_ERR(md->frozen_sb)) { 2368 r = PTR_ERR(md->frozen_sb); 2369 md->frozen_sb = NULL; 2370 return r; 2371 } 2372 2373 set_bit(DMF_FROZEN, &md->flags); 2374 2375 return 0; 2376 } 2377 2378 static void unlock_fs(struct mapped_device *md) 2379 { 2380 if (!test_bit(DMF_FROZEN, &md->flags)) 2381 return; 2382 2383 thaw_bdev(md->bdev, md->frozen_sb); 2384 md->frozen_sb = NULL; 2385 clear_bit(DMF_FROZEN, &md->flags); 2386 } 2387 2388 /* 2389 * We need to be able to change a mapping table under a mounted 2390 * filesystem. For example we might want to move some data in 2391 * the background. Before the table can be swapped with 2392 * dm_bind_table, dm_suspend must be called to flush any in 2393 * flight bios and ensure that any further io gets deferred. 2394 */ 2395 /* 2396 * Suspend mechanism in request-based dm. 2397 * 2398 * 1. Flush all I/Os by lock_fs() if needed. 2399 * 2. Stop dispatching any I/O by stopping the request_queue. 2400 * 3. Wait for all in-flight I/Os to be completed or requeued. 2401 * 2402 * To abort suspend, start the request_queue. 2403 */ 2404 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2405 { 2406 struct dm_table *map = NULL; 2407 int r = 0; 2408 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0; 2409 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0; 2410 2411 mutex_lock(&md->suspend_lock); 2412 2413 if (dm_suspended_md(md)) { 2414 r = -EINVAL; 2415 goto out_unlock; 2416 } 2417 2418 map = dm_get_live_table(md); 2419 2420 /* 2421 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 2422 * This flag is cleared before dm_suspend returns. 2423 */ 2424 if (noflush) 2425 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2426 2427 /* This does not get reverted if there's an error later. */ 2428 dm_table_presuspend_targets(map); 2429 2430 /* 2431 * Flush I/O to the device. 2432 * Any I/O submitted after lock_fs() may not be flushed. 2433 * noflush takes precedence over do_lockfs. 2434 * (lock_fs() flushes I/Os and waits for them to complete.) 2435 */ 2436 if (!noflush && do_lockfs) { 2437 r = lock_fs(md); 2438 if (r) 2439 goto out; 2440 } 2441 2442 /* 2443 * Here we must make sure that no processes are submitting requests 2444 * to target drivers i.e. no one may be executing 2445 * __split_and_process_bio. This is called from dm_request and 2446 * dm_wq_work. 2447 * 2448 * To get all processes out of __split_and_process_bio in dm_request, 2449 * we take the write lock. To prevent any process from reentering 2450 * __split_and_process_bio from dm_request and quiesce the thread 2451 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 2452 * flush_workqueue(md->wq). 2453 */ 2454 down_write(&md->io_lock); 2455 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2456 up_write(&md->io_lock); 2457 2458 /* 2459 * Stop md->queue before flushing md->wq in case request-based 2460 * dm defers requests to md->wq from md->queue. 2461 */ 2462 if (dm_request_based(md)) 2463 stop_queue(md->queue); 2464 2465 flush_workqueue(md->wq); 2466 2467 /* 2468 * At this point no more requests are entering target request routines. 2469 * We call dm_wait_for_completion to wait for all existing requests 2470 * to finish. 2471 */ 2472 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE); 2473 2474 down_write(&md->io_lock); 2475 if (noflush) 2476 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2477 up_write(&md->io_lock); 2478 2479 /* were we interrupted ? */ 2480 if (r < 0) { 2481 dm_queue_flush(md); 2482 2483 if (dm_request_based(md)) 2484 start_queue(md->queue); 2485 2486 unlock_fs(md); 2487 goto out; /* pushback list is already flushed, so skip flush */ 2488 } 2489 2490 /* 2491 * If dm_wait_for_completion returned 0, the device is completely 2492 * quiescent now. There is no request-processing activity. All new 2493 * requests are being added to md->deferred list. 2494 */ 2495 2496 set_bit(DMF_SUSPENDED, &md->flags); 2497 2498 dm_table_postsuspend_targets(map); 2499 2500 out: 2501 dm_table_put(map); 2502 2503 out_unlock: 2504 mutex_unlock(&md->suspend_lock); 2505 return r; 2506 } 2507 2508 int dm_resume(struct mapped_device *md) 2509 { 2510 int r = -EINVAL; 2511 struct dm_table *map = NULL; 2512 2513 mutex_lock(&md->suspend_lock); 2514 if (!dm_suspended_md(md)) 2515 goto out; 2516 2517 map = dm_get_live_table(md); 2518 if (!map || !dm_table_get_size(map)) 2519 goto out; 2520 2521 r = dm_table_resume_targets(map); 2522 if (r) 2523 goto out; 2524 2525 dm_queue_flush(md); 2526 2527 /* 2528 * Flushing deferred I/Os must be done after targets are resumed 2529 * so that mapping of targets can work correctly. 2530 * Request-based dm is queueing the deferred I/Os in its request_queue. 2531 */ 2532 if (dm_request_based(md)) 2533 start_queue(md->queue); 2534 2535 unlock_fs(md); 2536 2537 clear_bit(DMF_SUSPENDED, &md->flags); 2538 2539 dm_table_unplug_all(map); 2540 r = 0; 2541 out: 2542 dm_table_put(map); 2543 mutex_unlock(&md->suspend_lock); 2544 2545 return r; 2546 } 2547 2548 /*----------------------------------------------------------------- 2549 * Event notification. 2550 *---------------------------------------------------------------*/ 2551 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 2552 unsigned cookie) 2553 { 2554 char udev_cookie[DM_COOKIE_LENGTH]; 2555 char *envp[] = { udev_cookie, NULL }; 2556 2557 if (!cookie) 2558 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 2559 else { 2560 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 2561 DM_COOKIE_ENV_VAR_NAME, cookie); 2562 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 2563 action, envp); 2564 } 2565 } 2566 2567 uint32_t dm_next_uevent_seq(struct mapped_device *md) 2568 { 2569 return atomic_add_return(1, &md->uevent_seq); 2570 } 2571 2572 uint32_t dm_get_event_nr(struct mapped_device *md) 2573 { 2574 return atomic_read(&md->event_nr); 2575 } 2576 2577 int dm_wait_event(struct mapped_device *md, int event_nr) 2578 { 2579 return wait_event_interruptible(md->eventq, 2580 (event_nr != atomic_read(&md->event_nr))); 2581 } 2582 2583 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 2584 { 2585 unsigned long flags; 2586 2587 spin_lock_irqsave(&md->uevent_lock, flags); 2588 list_add(elist, &md->uevent_list); 2589 spin_unlock_irqrestore(&md->uevent_lock, flags); 2590 } 2591 2592 /* 2593 * The gendisk is only valid as long as you have a reference 2594 * count on 'md'. 2595 */ 2596 struct gendisk *dm_disk(struct mapped_device *md) 2597 { 2598 return md->disk; 2599 } 2600 2601 struct kobject *dm_kobject(struct mapped_device *md) 2602 { 2603 return &md->kobj; 2604 } 2605 2606 /* 2607 * struct mapped_device should not be exported outside of dm.c 2608 * so use this check to verify that kobj is part of md structure 2609 */ 2610 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2611 { 2612 struct mapped_device *md; 2613 2614 md = container_of(kobj, struct mapped_device, kobj); 2615 if (&md->kobj != kobj) 2616 return NULL; 2617 2618 if (test_bit(DMF_FREEING, &md->flags) || 2619 dm_deleting_md(md)) 2620 return NULL; 2621 2622 dm_get(md); 2623 return md; 2624 } 2625 2626 int dm_suspended_md(struct mapped_device *md) 2627 { 2628 return test_bit(DMF_SUSPENDED, &md->flags); 2629 } 2630 2631 int dm_suspended(struct dm_target *ti) 2632 { 2633 return dm_suspended_md(dm_table_get_md(ti->table)); 2634 } 2635 EXPORT_SYMBOL_GPL(dm_suspended); 2636 2637 int dm_noflush_suspending(struct dm_target *ti) 2638 { 2639 return __noflush_suspending(dm_table_get_md(ti->table)); 2640 } 2641 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2642 2643 struct dm_md_mempools *dm_alloc_md_mempools(unsigned type) 2644 { 2645 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL); 2646 2647 if (!pools) 2648 return NULL; 2649 2650 pools->io_pool = (type == DM_TYPE_BIO_BASED) ? 2651 mempool_create_slab_pool(MIN_IOS, _io_cache) : 2652 mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache); 2653 if (!pools->io_pool) 2654 goto free_pools_and_out; 2655 2656 pools->tio_pool = (type == DM_TYPE_BIO_BASED) ? 2657 mempool_create_slab_pool(MIN_IOS, _tio_cache) : 2658 mempool_create_slab_pool(MIN_IOS, _rq_tio_cache); 2659 if (!pools->tio_pool) 2660 goto free_io_pool_and_out; 2661 2662 pools->bs = (type == DM_TYPE_BIO_BASED) ? 2663 bioset_create(16, 0) : bioset_create(MIN_IOS, 0); 2664 if (!pools->bs) 2665 goto free_tio_pool_and_out; 2666 2667 return pools; 2668 2669 free_tio_pool_and_out: 2670 mempool_destroy(pools->tio_pool); 2671 2672 free_io_pool_and_out: 2673 mempool_destroy(pools->io_pool); 2674 2675 free_pools_and_out: 2676 kfree(pools); 2677 2678 return NULL; 2679 } 2680 2681 void dm_free_md_mempools(struct dm_md_mempools *pools) 2682 { 2683 if (!pools) 2684 return; 2685 2686 if (pools->io_pool) 2687 mempool_destroy(pools->io_pool); 2688 2689 if (pools->tio_pool) 2690 mempool_destroy(pools->tio_pool); 2691 2692 if (pools->bs) 2693 bioset_free(pools->bs); 2694 2695 kfree(pools); 2696 } 2697 2698 static const struct block_device_operations dm_blk_dops = { 2699 .open = dm_blk_open, 2700 .release = dm_blk_close, 2701 .ioctl = dm_blk_ioctl, 2702 .getgeo = dm_blk_getgeo, 2703 .owner = THIS_MODULE 2704 }; 2705 2706 EXPORT_SYMBOL(dm_get_mapinfo); 2707 2708 /* 2709 * module hooks 2710 */ 2711 module_init(dm_init); 2712 module_exit(dm_exit); 2713 2714 module_param(major, uint, 0); 2715 MODULE_PARM_DESC(major, "The major number of the device mapper"); 2716 MODULE_DESCRIPTION(DM_NAME " driver"); 2717 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 2718 MODULE_LICENSE("GPL"); 2719