1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm.h" 9 #include "dm-uevent.h" 10 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/mutex.h> 14 #include <linux/moduleparam.h> 15 #include <linux/blkpg.h> 16 #include <linux/bio.h> 17 #include <linux/mempool.h> 18 #include <linux/slab.h> 19 #include <linux/idr.h> 20 #include <linux/hdreg.h> 21 #include <linux/delay.h> 22 #include <linux/wait.h> 23 #include <linux/kthread.h> 24 25 #include <trace/events/block.h> 26 27 #define DM_MSG_PREFIX "core" 28 29 #ifdef CONFIG_PRINTK 30 /* 31 * ratelimit state to be used in DMXXX_LIMIT(). 32 */ 33 DEFINE_RATELIMIT_STATE(dm_ratelimit_state, 34 DEFAULT_RATELIMIT_INTERVAL, 35 DEFAULT_RATELIMIT_BURST); 36 EXPORT_SYMBOL(dm_ratelimit_state); 37 #endif 38 39 /* 40 * Cookies are numeric values sent with CHANGE and REMOVE 41 * uevents while resuming, removing or renaming the device. 42 */ 43 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 44 #define DM_COOKIE_LENGTH 24 45 46 static const char *_name = DM_NAME; 47 48 static unsigned int major = 0; 49 static unsigned int _major = 0; 50 51 static DEFINE_IDR(_minor_idr); 52 53 static DEFINE_SPINLOCK(_minor_lock); 54 55 static void do_deferred_remove(struct work_struct *w); 56 57 static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 58 59 static struct workqueue_struct *deferred_remove_workqueue; 60 61 /* 62 * For bio-based dm. 63 * One of these is allocated per bio. 64 */ 65 struct dm_io { 66 struct mapped_device *md; 67 int error; 68 atomic_t io_count; 69 struct bio *bio; 70 unsigned long start_time; 71 spinlock_t endio_lock; 72 struct dm_stats_aux stats_aux; 73 }; 74 75 /* 76 * For request-based dm. 77 * One of these is allocated per request. 78 */ 79 struct dm_rq_target_io { 80 struct mapped_device *md; 81 struct dm_target *ti; 82 struct request *orig, *clone; 83 struct kthread_work work; 84 int error; 85 union map_info info; 86 }; 87 88 /* 89 * For request-based dm - the bio clones we allocate are embedded in these 90 * structs. 91 * 92 * We allocate these with bio_alloc_bioset, using the front_pad parameter when 93 * the bioset is created - this means the bio has to come at the end of the 94 * struct. 95 */ 96 struct dm_rq_clone_bio_info { 97 struct bio *orig; 98 struct dm_rq_target_io *tio; 99 struct bio clone; 100 }; 101 102 union map_info *dm_get_rq_mapinfo(struct request *rq) 103 { 104 if (rq && rq->end_io_data) 105 return &((struct dm_rq_target_io *)rq->end_io_data)->info; 106 return NULL; 107 } 108 EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); 109 110 #define MINOR_ALLOCED ((void *)-1) 111 112 /* 113 * Bits for the md->flags field. 114 */ 115 #define DMF_BLOCK_IO_FOR_SUSPEND 0 116 #define DMF_SUSPENDED 1 117 #define DMF_FROZEN 2 118 #define DMF_FREEING 3 119 #define DMF_DELETING 4 120 #define DMF_NOFLUSH_SUSPENDING 5 121 #define DMF_MERGE_IS_OPTIONAL 6 122 #define DMF_DEFERRED_REMOVE 7 123 #define DMF_SUSPENDED_INTERNALLY 8 124 125 /* 126 * A dummy definition to make RCU happy. 127 * struct dm_table should never be dereferenced in this file. 128 */ 129 struct dm_table { 130 int undefined__; 131 }; 132 133 /* 134 * Work processed by per-device workqueue. 135 */ 136 struct mapped_device { 137 struct srcu_struct io_barrier; 138 struct mutex suspend_lock; 139 atomic_t holders; 140 atomic_t open_count; 141 142 /* 143 * The current mapping. 144 * Use dm_get_live_table{_fast} or take suspend_lock for 145 * dereference. 146 */ 147 struct dm_table __rcu *map; 148 149 struct list_head table_devices; 150 struct mutex table_devices_lock; 151 152 unsigned long flags; 153 154 struct request_queue *queue; 155 unsigned type; 156 /* Protect queue and type against concurrent access. */ 157 struct mutex type_lock; 158 159 struct target_type *immutable_target_type; 160 161 struct gendisk *disk; 162 char name[16]; 163 164 void *interface_ptr; 165 166 /* 167 * A list of ios that arrived while we were suspended. 168 */ 169 atomic_t pending[2]; 170 wait_queue_head_t wait; 171 struct work_struct work; 172 struct bio_list deferred; 173 spinlock_t deferred_lock; 174 175 /* 176 * Processing queue (flush) 177 */ 178 struct workqueue_struct *wq; 179 180 /* 181 * io objects are allocated from here. 182 */ 183 mempool_t *io_pool; 184 mempool_t *rq_pool; 185 186 struct bio_set *bs; 187 188 /* 189 * Event handling. 190 */ 191 atomic_t event_nr; 192 wait_queue_head_t eventq; 193 atomic_t uevent_seq; 194 struct list_head uevent_list; 195 spinlock_t uevent_lock; /* Protect access to uevent_list */ 196 197 /* 198 * freeze/thaw support require holding onto a super block 199 */ 200 struct super_block *frozen_sb; 201 struct block_device *bdev; 202 203 /* forced geometry settings */ 204 struct hd_geometry geometry; 205 206 /* kobject and completion */ 207 struct dm_kobject_holder kobj_holder; 208 209 /* zero-length flush that will be cloned and submitted to targets */ 210 struct bio flush_bio; 211 212 struct dm_stats stats; 213 214 struct kthread_worker kworker; 215 struct task_struct *kworker_task; 216 }; 217 218 /* 219 * For mempools pre-allocation at the table loading time. 220 */ 221 struct dm_md_mempools { 222 mempool_t *io_pool; 223 mempool_t *rq_pool; 224 struct bio_set *bs; 225 }; 226 227 struct table_device { 228 struct list_head list; 229 atomic_t count; 230 struct dm_dev dm_dev; 231 }; 232 233 #define RESERVED_BIO_BASED_IOS 16 234 #define RESERVED_REQUEST_BASED_IOS 256 235 #define RESERVED_MAX_IOS 1024 236 static struct kmem_cache *_io_cache; 237 static struct kmem_cache *_rq_tio_cache; 238 static struct kmem_cache *_rq_cache; 239 240 /* 241 * Bio-based DM's mempools' reserved IOs set by the user. 242 */ 243 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 244 245 /* 246 * Request-based DM's mempools' reserved IOs set by the user. 247 */ 248 static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; 249 250 static unsigned __dm_get_reserved_ios(unsigned *reserved_ios, 251 unsigned def, unsigned max) 252 { 253 unsigned ios = ACCESS_ONCE(*reserved_ios); 254 unsigned modified_ios = 0; 255 256 if (!ios) 257 modified_ios = def; 258 else if (ios > max) 259 modified_ios = max; 260 261 if (modified_ios) { 262 (void)cmpxchg(reserved_ios, ios, modified_ios); 263 ios = modified_ios; 264 } 265 266 return ios; 267 } 268 269 unsigned dm_get_reserved_bio_based_ios(void) 270 { 271 return __dm_get_reserved_ios(&reserved_bio_based_ios, 272 RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS); 273 } 274 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 275 276 unsigned dm_get_reserved_rq_based_ios(void) 277 { 278 return __dm_get_reserved_ios(&reserved_rq_based_ios, 279 RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS); 280 } 281 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); 282 283 static int __init local_init(void) 284 { 285 int r = -ENOMEM; 286 287 /* allocate a slab for the dm_ios */ 288 _io_cache = KMEM_CACHE(dm_io, 0); 289 if (!_io_cache) 290 return r; 291 292 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 293 if (!_rq_tio_cache) 294 goto out_free_io_cache; 295 296 _rq_cache = kmem_cache_create("dm_clone_request", sizeof(struct request), 297 __alignof__(struct request), 0, NULL); 298 if (!_rq_cache) 299 goto out_free_rq_tio_cache; 300 301 r = dm_uevent_init(); 302 if (r) 303 goto out_free_rq_cache; 304 305 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 306 if (!deferred_remove_workqueue) { 307 r = -ENOMEM; 308 goto out_uevent_exit; 309 } 310 311 _major = major; 312 r = register_blkdev(_major, _name); 313 if (r < 0) 314 goto out_free_workqueue; 315 316 if (!_major) 317 _major = r; 318 319 return 0; 320 321 out_free_workqueue: 322 destroy_workqueue(deferred_remove_workqueue); 323 out_uevent_exit: 324 dm_uevent_exit(); 325 out_free_rq_cache: 326 kmem_cache_destroy(_rq_cache); 327 out_free_rq_tio_cache: 328 kmem_cache_destroy(_rq_tio_cache); 329 out_free_io_cache: 330 kmem_cache_destroy(_io_cache); 331 332 return r; 333 } 334 335 static void local_exit(void) 336 { 337 flush_scheduled_work(); 338 destroy_workqueue(deferred_remove_workqueue); 339 340 kmem_cache_destroy(_rq_cache); 341 kmem_cache_destroy(_rq_tio_cache); 342 kmem_cache_destroy(_io_cache); 343 unregister_blkdev(_major, _name); 344 dm_uevent_exit(); 345 346 _major = 0; 347 348 DMINFO("cleaned up"); 349 } 350 351 static int (*_inits[])(void) __initdata = { 352 local_init, 353 dm_target_init, 354 dm_linear_init, 355 dm_stripe_init, 356 dm_io_init, 357 dm_kcopyd_init, 358 dm_interface_init, 359 dm_statistics_init, 360 }; 361 362 static void (*_exits[])(void) = { 363 local_exit, 364 dm_target_exit, 365 dm_linear_exit, 366 dm_stripe_exit, 367 dm_io_exit, 368 dm_kcopyd_exit, 369 dm_interface_exit, 370 dm_statistics_exit, 371 }; 372 373 static int __init dm_init(void) 374 { 375 const int count = ARRAY_SIZE(_inits); 376 377 int r, i; 378 379 for (i = 0; i < count; i++) { 380 r = _inits[i](); 381 if (r) 382 goto bad; 383 } 384 385 return 0; 386 387 bad: 388 while (i--) 389 _exits[i](); 390 391 return r; 392 } 393 394 static void __exit dm_exit(void) 395 { 396 int i = ARRAY_SIZE(_exits); 397 398 while (i--) 399 _exits[i](); 400 401 /* 402 * Should be empty by this point. 403 */ 404 idr_destroy(&_minor_idr); 405 } 406 407 /* 408 * Block device functions 409 */ 410 int dm_deleting_md(struct mapped_device *md) 411 { 412 return test_bit(DMF_DELETING, &md->flags); 413 } 414 415 static int dm_blk_open(struct block_device *bdev, fmode_t mode) 416 { 417 struct mapped_device *md; 418 419 spin_lock(&_minor_lock); 420 421 md = bdev->bd_disk->private_data; 422 if (!md) 423 goto out; 424 425 if (test_bit(DMF_FREEING, &md->flags) || 426 dm_deleting_md(md)) { 427 md = NULL; 428 goto out; 429 } 430 431 dm_get(md); 432 atomic_inc(&md->open_count); 433 434 out: 435 spin_unlock(&_minor_lock); 436 437 return md ? 0 : -ENXIO; 438 } 439 440 static void dm_blk_close(struct gendisk *disk, fmode_t mode) 441 { 442 struct mapped_device *md = disk->private_data; 443 444 spin_lock(&_minor_lock); 445 446 if (atomic_dec_and_test(&md->open_count) && 447 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 448 queue_work(deferred_remove_workqueue, &deferred_remove_work); 449 450 dm_put(md); 451 452 spin_unlock(&_minor_lock); 453 } 454 455 int dm_open_count(struct mapped_device *md) 456 { 457 return atomic_read(&md->open_count); 458 } 459 460 /* 461 * Guarantees nothing is using the device before it's deleted. 462 */ 463 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 464 { 465 int r = 0; 466 467 spin_lock(&_minor_lock); 468 469 if (dm_open_count(md)) { 470 r = -EBUSY; 471 if (mark_deferred) 472 set_bit(DMF_DEFERRED_REMOVE, &md->flags); 473 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 474 r = -EEXIST; 475 else 476 set_bit(DMF_DELETING, &md->flags); 477 478 spin_unlock(&_minor_lock); 479 480 return r; 481 } 482 483 int dm_cancel_deferred_remove(struct mapped_device *md) 484 { 485 int r = 0; 486 487 spin_lock(&_minor_lock); 488 489 if (test_bit(DMF_DELETING, &md->flags)) 490 r = -EBUSY; 491 else 492 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 493 494 spin_unlock(&_minor_lock); 495 496 return r; 497 } 498 499 static void do_deferred_remove(struct work_struct *w) 500 { 501 dm_deferred_remove(); 502 } 503 504 sector_t dm_get_size(struct mapped_device *md) 505 { 506 return get_capacity(md->disk); 507 } 508 509 struct request_queue *dm_get_md_queue(struct mapped_device *md) 510 { 511 return md->queue; 512 } 513 514 struct dm_stats *dm_get_stats(struct mapped_device *md) 515 { 516 return &md->stats; 517 } 518 519 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 520 { 521 struct mapped_device *md = bdev->bd_disk->private_data; 522 523 return dm_get_geometry(md, geo); 524 } 525 526 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 527 unsigned int cmd, unsigned long arg) 528 { 529 struct mapped_device *md = bdev->bd_disk->private_data; 530 int srcu_idx; 531 struct dm_table *map; 532 struct dm_target *tgt; 533 int r = -ENOTTY; 534 535 retry: 536 map = dm_get_live_table(md, &srcu_idx); 537 538 if (!map || !dm_table_get_size(map)) 539 goto out; 540 541 /* We only support devices that have a single target */ 542 if (dm_table_get_num_targets(map) != 1) 543 goto out; 544 545 tgt = dm_table_get_target(map, 0); 546 if (!tgt->type->ioctl) 547 goto out; 548 549 if (dm_suspended_md(md)) { 550 r = -EAGAIN; 551 goto out; 552 } 553 554 r = tgt->type->ioctl(tgt, cmd, arg); 555 556 out: 557 dm_put_live_table(md, srcu_idx); 558 559 if (r == -ENOTCONN) { 560 msleep(10); 561 goto retry; 562 } 563 564 return r; 565 } 566 567 static struct dm_io *alloc_io(struct mapped_device *md) 568 { 569 return mempool_alloc(md->io_pool, GFP_NOIO); 570 } 571 572 static void free_io(struct mapped_device *md, struct dm_io *io) 573 { 574 mempool_free(io, md->io_pool); 575 } 576 577 static void free_tio(struct mapped_device *md, struct dm_target_io *tio) 578 { 579 bio_put(&tio->clone); 580 } 581 582 static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, 583 gfp_t gfp_mask) 584 { 585 return mempool_alloc(md->io_pool, gfp_mask); 586 } 587 588 static void free_rq_tio(struct dm_rq_target_io *tio) 589 { 590 mempool_free(tio, tio->md->io_pool); 591 } 592 593 static struct request *alloc_clone_request(struct mapped_device *md, 594 gfp_t gfp_mask) 595 { 596 return mempool_alloc(md->rq_pool, gfp_mask); 597 } 598 599 static void free_clone_request(struct mapped_device *md, struct request *rq) 600 { 601 mempool_free(rq, md->rq_pool); 602 } 603 604 static int md_in_flight(struct mapped_device *md) 605 { 606 return atomic_read(&md->pending[READ]) + 607 atomic_read(&md->pending[WRITE]); 608 } 609 610 static void start_io_acct(struct dm_io *io) 611 { 612 struct mapped_device *md = io->md; 613 struct bio *bio = io->bio; 614 int cpu; 615 int rw = bio_data_dir(bio); 616 617 io->start_time = jiffies; 618 619 cpu = part_stat_lock(); 620 part_round_stats(cpu, &dm_disk(md)->part0); 621 part_stat_unlock(); 622 atomic_set(&dm_disk(md)->part0.in_flight[rw], 623 atomic_inc_return(&md->pending[rw])); 624 625 if (unlikely(dm_stats_used(&md->stats))) 626 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 627 bio_sectors(bio), false, 0, &io->stats_aux); 628 } 629 630 static void end_io_acct(struct dm_io *io) 631 { 632 struct mapped_device *md = io->md; 633 struct bio *bio = io->bio; 634 unsigned long duration = jiffies - io->start_time; 635 int pending; 636 int rw = bio_data_dir(bio); 637 638 generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time); 639 640 if (unlikely(dm_stats_used(&md->stats))) 641 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 642 bio_sectors(bio), true, duration, &io->stats_aux); 643 644 /* 645 * After this is decremented the bio must not be touched if it is 646 * a flush. 647 */ 648 pending = atomic_dec_return(&md->pending[rw]); 649 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); 650 pending += atomic_read(&md->pending[rw^0x1]); 651 652 /* nudge anyone waiting on suspend queue */ 653 if (!pending) 654 wake_up(&md->wait); 655 } 656 657 /* 658 * Add the bio to the list of deferred io. 659 */ 660 static void queue_io(struct mapped_device *md, struct bio *bio) 661 { 662 unsigned long flags; 663 664 spin_lock_irqsave(&md->deferred_lock, flags); 665 bio_list_add(&md->deferred, bio); 666 spin_unlock_irqrestore(&md->deferred_lock, flags); 667 queue_work(md->wq, &md->work); 668 } 669 670 /* 671 * Everyone (including functions in this file), should use this 672 * function to access the md->map field, and make sure they call 673 * dm_put_live_table() when finished. 674 */ 675 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 676 { 677 *srcu_idx = srcu_read_lock(&md->io_barrier); 678 679 return srcu_dereference(md->map, &md->io_barrier); 680 } 681 682 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 683 { 684 srcu_read_unlock(&md->io_barrier, srcu_idx); 685 } 686 687 void dm_sync_table(struct mapped_device *md) 688 { 689 synchronize_srcu(&md->io_barrier); 690 synchronize_rcu_expedited(); 691 } 692 693 /* 694 * A fast alternative to dm_get_live_table/dm_put_live_table. 695 * The caller must not block between these two functions. 696 */ 697 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 698 { 699 rcu_read_lock(); 700 return rcu_dereference(md->map); 701 } 702 703 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 704 { 705 rcu_read_unlock(); 706 } 707 708 /* 709 * Open a table device so we can use it as a map destination. 710 */ 711 static int open_table_device(struct table_device *td, dev_t dev, 712 struct mapped_device *md) 713 { 714 static char *_claim_ptr = "I belong to device-mapper"; 715 struct block_device *bdev; 716 717 int r; 718 719 BUG_ON(td->dm_dev.bdev); 720 721 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); 722 if (IS_ERR(bdev)) 723 return PTR_ERR(bdev); 724 725 r = bd_link_disk_holder(bdev, dm_disk(md)); 726 if (r) { 727 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 728 return r; 729 } 730 731 td->dm_dev.bdev = bdev; 732 return 0; 733 } 734 735 /* 736 * Close a table device that we've been using. 737 */ 738 static void close_table_device(struct table_device *td, struct mapped_device *md) 739 { 740 if (!td->dm_dev.bdev) 741 return; 742 743 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 744 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 745 td->dm_dev.bdev = NULL; 746 } 747 748 static struct table_device *find_table_device(struct list_head *l, dev_t dev, 749 fmode_t mode) { 750 struct table_device *td; 751 752 list_for_each_entry(td, l, list) 753 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 754 return td; 755 756 return NULL; 757 } 758 759 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 760 struct dm_dev **result) { 761 int r; 762 struct table_device *td; 763 764 mutex_lock(&md->table_devices_lock); 765 td = find_table_device(&md->table_devices, dev, mode); 766 if (!td) { 767 td = kmalloc(sizeof(*td), GFP_KERNEL); 768 if (!td) { 769 mutex_unlock(&md->table_devices_lock); 770 return -ENOMEM; 771 } 772 773 td->dm_dev.mode = mode; 774 td->dm_dev.bdev = NULL; 775 776 if ((r = open_table_device(td, dev, md))) { 777 mutex_unlock(&md->table_devices_lock); 778 kfree(td); 779 return r; 780 } 781 782 format_dev_t(td->dm_dev.name, dev); 783 784 atomic_set(&td->count, 0); 785 list_add(&td->list, &md->table_devices); 786 } 787 atomic_inc(&td->count); 788 mutex_unlock(&md->table_devices_lock); 789 790 *result = &td->dm_dev; 791 return 0; 792 } 793 EXPORT_SYMBOL_GPL(dm_get_table_device); 794 795 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 796 { 797 struct table_device *td = container_of(d, struct table_device, dm_dev); 798 799 mutex_lock(&md->table_devices_lock); 800 if (atomic_dec_and_test(&td->count)) { 801 close_table_device(td, md); 802 list_del(&td->list); 803 kfree(td); 804 } 805 mutex_unlock(&md->table_devices_lock); 806 } 807 EXPORT_SYMBOL(dm_put_table_device); 808 809 static void free_table_devices(struct list_head *devices) 810 { 811 struct list_head *tmp, *next; 812 813 list_for_each_safe(tmp, next, devices) { 814 struct table_device *td = list_entry(tmp, struct table_device, list); 815 816 DMWARN("dm_destroy: %s still exists with %d references", 817 td->dm_dev.name, atomic_read(&td->count)); 818 kfree(td); 819 } 820 } 821 822 /* 823 * Get the geometry associated with a dm device 824 */ 825 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 826 { 827 *geo = md->geometry; 828 829 return 0; 830 } 831 832 /* 833 * Set the geometry of a device. 834 */ 835 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 836 { 837 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 838 839 if (geo->start > sz) { 840 DMWARN("Start sector is beyond the geometry limits."); 841 return -EINVAL; 842 } 843 844 md->geometry = *geo; 845 846 return 0; 847 } 848 849 /*----------------------------------------------------------------- 850 * CRUD START: 851 * A more elegant soln is in the works that uses the queue 852 * merge fn, unfortunately there are a couple of changes to 853 * the block layer that I want to make for this. So in the 854 * interests of getting something for people to use I give 855 * you this clearly demarcated crap. 856 *---------------------------------------------------------------*/ 857 858 static int __noflush_suspending(struct mapped_device *md) 859 { 860 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 861 } 862 863 /* 864 * Decrements the number of outstanding ios that a bio has been 865 * cloned into, completing the original io if necc. 866 */ 867 static void dec_pending(struct dm_io *io, int error) 868 { 869 unsigned long flags; 870 int io_error; 871 struct bio *bio; 872 struct mapped_device *md = io->md; 873 874 /* Push-back supersedes any I/O errors */ 875 if (unlikely(error)) { 876 spin_lock_irqsave(&io->endio_lock, flags); 877 if (!(io->error > 0 && __noflush_suspending(md))) 878 io->error = error; 879 spin_unlock_irqrestore(&io->endio_lock, flags); 880 } 881 882 if (atomic_dec_and_test(&io->io_count)) { 883 if (io->error == DM_ENDIO_REQUEUE) { 884 /* 885 * Target requested pushing back the I/O. 886 */ 887 spin_lock_irqsave(&md->deferred_lock, flags); 888 if (__noflush_suspending(md)) 889 bio_list_add_head(&md->deferred, io->bio); 890 else 891 /* noflush suspend was interrupted. */ 892 io->error = -EIO; 893 spin_unlock_irqrestore(&md->deferred_lock, flags); 894 } 895 896 io_error = io->error; 897 bio = io->bio; 898 end_io_acct(io); 899 free_io(md, io); 900 901 if (io_error == DM_ENDIO_REQUEUE) 902 return; 903 904 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { 905 /* 906 * Preflush done for flush with data, reissue 907 * without REQ_FLUSH. 908 */ 909 bio->bi_rw &= ~REQ_FLUSH; 910 queue_io(md, bio); 911 } else { 912 /* done with normal IO or empty flush */ 913 trace_block_bio_complete(md->queue, bio, io_error); 914 bio_endio(bio, io_error); 915 } 916 } 917 } 918 919 static void disable_write_same(struct mapped_device *md) 920 { 921 struct queue_limits *limits = dm_get_queue_limits(md); 922 923 /* device doesn't really support WRITE SAME, disable it */ 924 limits->max_write_same_sectors = 0; 925 } 926 927 static void clone_endio(struct bio *bio, int error) 928 { 929 int r = error; 930 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 931 struct dm_io *io = tio->io; 932 struct mapped_device *md = tio->io->md; 933 dm_endio_fn endio = tio->ti->type->end_io; 934 935 if (!bio_flagged(bio, BIO_UPTODATE) && !error) 936 error = -EIO; 937 938 if (endio) { 939 r = endio(tio->ti, bio, error); 940 if (r < 0 || r == DM_ENDIO_REQUEUE) 941 /* 942 * error and requeue request are handled 943 * in dec_pending(). 944 */ 945 error = r; 946 else if (r == DM_ENDIO_INCOMPLETE) 947 /* The target will handle the io */ 948 return; 949 else if (r) { 950 DMWARN("unimplemented target endio return value: %d", r); 951 BUG(); 952 } 953 } 954 955 if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) && 956 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) 957 disable_write_same(md); 958 959 free_tio(md, tio); 960 dec_pending(io, error); 961 } 962 963 /* 964 * Partial completion handling for request-based dm 965 */ 966 static void end_clone_bio(struct bio *clone, int error) 967 { 968 struct dm_rq_clone_bio_info *info = 969 container_of(clone, struct dm_rq_clone_bio_info, clone); 970 struct dm_rq_target_io *tio = info->tio; 971 struct bio *bio = info->orig; 972 unsigned int nr_bytes = info->orig->bi_iter.bi_size; 973 974 bio_put(clone); 975 976 if (tio->error) 977 /* 978 * An error has already been detected on the request. 979 * Once error occurred, just let clone->end_io() handle 980 * the remainder. 981 */ 982 return; 983 else if (error) { 984 /* 985 * Don't notice the error to the upper layer yet. 986 * The error handling decision is made by the target driver, 987 * when the request is completed. 988 */ 989 tio->error = error; 990 return; 991 } 992 993 /* 994 * I/O for the bio successfully completed. 995 * Notice the data completion to the upper layer. 996 */ 997 998 /* 999 * bios are processed from the head of the list. 1000 * So the completing bio should always be rq->bio. 1001 * If it's not, something wrong is happening. 1002 */ 1003 if (tio->orig->bio != bio) 1004 DMERR("bio completion is going in the middle of the request"); 1005 1006 /* 1007 * Update the original request. 1008 * Do not use blk_end_request() here, because it may complete 1009 * the original request before the clone, and break the ordering. 1010 */ 1011 blk_update_request(tio->orig, 0, nr_bytes); 1012 } 1013 1014 /* 1015 * Don't touch any member of the md after calling this function because 1016 * the md may be freed in dm_put() at the end of this function. 1017 * Or do dm_get() before calling this function and dm_put() later. 1018 */ 1019 static void rq_completed(struct mapped_device *md, int rw, bool run_queue) 1020 { 1021 atomic_dec(&md->pending[rw]); 1022 1023 /* nudge anyone waiting on suspend queue */ 1024 if (!md_in_flight(md)) 1025 wake_up(&md->wait); 1026 1027 /* 1028 * Run this off this callpath, as drivers could invoke end_io while 1029 * inside their request_fn (and holding the queue lock). Calling 1030 * back into ->request_fn() could deadlock attempting to grab the 1031 * queue lock again. 1032 */ 1033 if (run_queue) 1034 blk_run_queue_async(md->queue); 1035 1036 /* 1037 * dm_put() must be at the end of this function. See the comment above 1038 */ 1039 dm_put(md); 1040 } 1041 1042 static void free_rq_clone(struct request *clone) 1043 { 1044 struct dm_rq_target_io *tio = clone->end_io_data; 1045 1046 blk_rq_unprep_clone(clone); 1047 free_clone_request(tio->md, clone); 1048 free_rq_tio(tio); 1049 } 1050 1051 /* 1052 * Complete the clone and the original request. 1053 * Must be called without clone's queue lock held, 1054 * see end_clone_request() for more details. 1055 */ 1056 static void dm_end_request(struct request *clone, int error) 1057 { 1058 int rw = rq_data_dir(clone); 1059 struct dm_rq_target_io *tio = clone->end_io_data; 1060 struct mapped_device *md = tio->md; 1061 struct request *rq = tio->orig; 1062 1063 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 1064 rq->errors = clone->errors; 1065 rq->resid_len = clone->resid_len; 1066 1067 if (rq->sense) 1068 /* 1069 * We are using the sense buffer of the original 1070 * request. 1071 * So setting the length of the sense data is enough. 1072 */ 1073 rq->sense_len = clone->sense_len; 1074 } 1075 1076 free_rq_clone(clone); 1077 blk_end_request_all(rq, error); 1078 rq_completed(md, rw, true); 1079 } 1080 1081 static void dm_unprep_request(struct request *rq) 1082 { 1083 struct dm_rq_target_io *tio = rq->special; 1084 struct request *clone = tio->clone; 1085 1086 rq->special = NULL; 1087 rq->cmd_flags &= ~REQ_DONTPREP; 1088 1089 free_rq_clone(clone); 1090 } 1091 1092 /* 1093 * Requeue the original request of a clone. 1094 */ 1095 static void dm_requeue_unmapped_original_request(struct mapped_device *md, 1096 struct request *rq) 1097 { 1098 int rw = rq_data_dir(rq); 1099 struct request_queue *q = rq->q; 1100 unsigned long flags; 1101 1102 dm_unprep_request(rq); 1103 1104 spin_lock_irqsave(q->queue_lock, flags); 1105 blk_requeue_request(q, rq); 1106 spin_unlock_irqrestore(q->queue_lock, flags); 1107 1108 rq_completed(md, rw, false); 1109 } 1110 1111 static void dm_requeue_unmapped_request(struct request *clone) 1112 { 1113 struct dm_rq_target_io *tio = clone->end_io_data; 1114 1115 dm_requeue_unmapped_original_request(tio->md, tio->orig); 1116 } 1117 1118 static void __stop_queue(struct request_queue *q) 1119 { 1120 blk_stop_queue(q); 1121 } 1122 1123 static void stop_queue(struct request_queue *q) 1124 { 1125 unsigned long flags; 1126 1127 spin_lock_irqsave(q->queue_lock, flags); 1128 __stop_queue(q); 1129 spin_unlock_irqrestore(q->queue_lock, flags); 1130 } 1131 1132 static void __start_queue(struct request_queue *q) 1133 { 1134 if (blk_queue_stopped(q)) 1135 blk_start_queue(q); 1136 } 1137 1138 static void start_queue(struct request_queue *q) 1139 { 1140 unsigned long flags; 1141 1142 spin_lock_irqsave(q->queue_lock, flags); 1143 __start_queue(q); 1144 spin_unlock_irqrestore(q->queue_lock, flags); 1145 } 1146 1147 static void dm_done(struct request *clone, int error, bool mapped) 1148 { 1149 int r = error; 1150 struct dm_rq_target_io *tio = clone->end_io_data; 1151 dm_request_endio_fn rq_end_io = NULL; 1152 1153 if (tio->ti) { 1154 rq_end_io = tio->ti->type->rq_end_io; 1155 1156 if (mapped && rq_end_io) 1157 r = rq_end_io(tio->ti, clone, error, &tio->info); 1158 } 1159 1160 if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) && 1161 !clone->q->limits.max_write_same_sectors)) 1162 disable_write_same(tio->md); 1163 1164 if (r <= 0) 1165 /* The target wants to complete the I/O */ 1166 dm_end_request(clone, r); 1167 else if (r == DM_ENDIO_INCOMPLETE) 1168 /* The target will handle the I/O */ 1169 return; 1170 else if (r == DM_ENDIO_REQUEUE) 1171 /* The target wants to requeue the I/O */ 1172 dm_requeue_unmapped_request(clone); 1173 else { 1174 DMWARN("unimplemented target endio return value: %d", r); 1175 BUG(); 1176 } 1177 } 1178 1179 /* 1180 * Request completion handler for request-based dm 1181 */ 1182 static void dm_softirq_done(struct request *rq) 1183 { 1184 bool mapped = true; 1185 struct dm_rq_target_io *tio = rq->special; 1186 struct request *clone = tio->clone; 1187 1188 if (rq->cmd_flags & REQ_FAILED) 1189 mapped = false; 1190 1191 dm_done(clone, tio->error, mapped); 1192 } 1193 1194 /* 1195 * Complete the clone and the original request with the error status 1196 * through softirq context. 1197 */ 1198 static void dm_complete_request(struct request *rq, int error) 1199 { 1200 struct dm_rq_target_io *tio = rq->special; 1201 1202 tio->error = error; 1203 blk_complete_request(rq); 1204 } 1205 1206 /* 1207 * Complete the not-mapped clone and the original request with the error status 1208 * through softirq context. 1209 * Target's rq_end_io() function isn't called. 1210 * This may be used when the target's map_rq() function fails. 1211 */ 1212 static void dm_kill_unmapped_request(struct request *rq, int error) 1213 { 1214 rq->cmd_flags |= REQ_FAILED; 1215 dm_complete_request(rq, error); 1216 } 1217 1218 /* 1219 * Called with the clone's queue lock held 1220 */ 1221 static void end_clone_request(struct request *clone, int error) 1222 { 1223 struct dm_rq_target_io *tio = clone->end_io_data; 1224 1225 /* 1226 * For just cleaning up the information of the queue in which 1227 * the clone was dispatched. 1228 * The clone is *NOT* freed actually here because it is alloced from 1229 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags. 1230 */ 1231 __blk_put_request(clone->q, clone); 1232 1233 /* 1234 * Actual request completion is done in a softirq context which doesn't 1235 * hold the clone's queue lock. Otherwise, deadlock could occur because: 1236 * - another request may be submitted by the upper level driver 1237 * of the stacking during the completion 1238 * - the submission which requires queue lock may be done 1239 * against this clone's queue 1240 */ 1241 dm_complete_request(tio->orig, error); 1242 } 1243 1244 /* 1245 * Return maximum size of I/O possible at the supplied sector up to the current 1246 * target boundary. 1247 */ 1248 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 1249 { 1250 sector_t target_offset = dm_target_offset(ti, sector); 1251 1252 return ti->len - target_offset; 1253 } 1254 1255 static sector_t max_io_len(sector_t sector, struct dm_target *ti) 1256 { 1257 sector_t len = max_io_len_target_boundary(sector, ti); 1258 sector_t offset, max_len; 1259 1260 /* 1261 * Does the target need to split even further? 1262 */ 1263 if (ti->max_io_len) { 1264 offset = dm_target_offset(ti, sector); 1265 if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 1266 max_len = sector_div(offset, ti->max_io_len); 1267 else 1268 max_len = offset & (ti->max_io_len - 1); 1269 max_len = ti->max_io_len - max_len; 1270 1271 if (len > max_len) 1272 len = max_len; 1273 } 1274 1275 return len; 1276 } 1277 1278 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1279 { 1280 if (len > UINT_MAX) { 1281 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1282 (unsigned long long)len, UINT_MAX); 1283 ti->error = "Maximum size of target IO is too large"; 1284 return -EINVAL; 1285 } 1286 1287 ti->max_io_len = (uint32_t) len; 1288 1289 return 0; 1290 } 1291 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1292 1293 /* 1294 * A target may call dm_accept_partial_bio only from the map routine. It is 1295 * allowed for all bio types except REQ_FLUSH. 1296 * 1297 * dm_accept_partial_bio informs the dm that the target only wants to process 1298 * additional n_sectors sectors of the bio and the rest of the data should be 1299 * sent in a next bio. 1300 * 1301 * A diagram that explains the arithmetics: 1302 * +--------------------+---------------+-------+ 1303 * | 1 | 2 | 3 | 1304 * +--------------------+---------------+-------+ 1305 * 1306 * <-------------- *tio->len_ptr ---------------> 1307 * <------- bi_size -------> 1308 * <-- n_sectors --> 1309 * 1310 * Region 1 was already iterated over with bio_advance or similar function. 1311 * (it may be empty if the target doesn't use bio_advance) 1312 * Region 2 is the remaining bio size that the target wants to process. 1313 * (it may be empty if region 1 is non-empty, although there is no reason 1314 * to make it empty) 1315 * The target requires that region 3 is to be sent in the next bio. 1316 * 1317 * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 1318 * the partially processed part (the sum of regions 1+2) must be the same for all 1319 * copies of the bio. 1320 */ 1321 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 1322 { 1323 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1324 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 1325 BUG_ON(bio->bi_rw & REQ_FLUSH); 1326 BUG_ON(bi_size > *tio->len_ptr); 1327 BUG_ON(n_sectors > bi_size); 1328 *tio->len_ptr -= bi_size - n_sectors; 1329 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1330 } 1331 EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1332 1333 static void __map_bio(struct dm_target_io *tio) 1334 { 1335 int r; 1336 sector_t sector; 1337 struct mapped_device *md; 1338 struct bio *clone = &tio->clone; 1339 struct dm_target *ti = tio->ti; 1340 1341 clone->bi_end_io = clone_endio; 1342 1343 /* 1344 * Map the clone. If r == 0 we don't need to do 1345 * anything, the target has assumed ownership of 1346 * this io. 1347 */ 1348 atomic_inc(&tio->io->io_count); 1349 sector = clone->bi_iter.bi_sector; 1350 r = ti->type->map(ti, clone); 1351 if (r == DM_MAPIO_REMAPPED) { 1352 /* the bio has been remapped so dispatch it */ 1353 1354 trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, 1355 tio->io->bio->bi_bdev->bd_dev, sector); 1356 1357 generic_make_request(clone); 1358 } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 1359 /* error the io and bail out, or requeue it if needed */ 1360 md = tio->io->md; 1361 dec_pending(tio->io, r); 1362 free_tio(md, tio); 1363 } else if (r) { 1364 DMWARN("unimplemented target map return value: %d", r); 1365 BUG(); 1366 } 1367 } 1368 1369 struct clone_info { 1370 struct mapped_device *md; 1371 struct dm_table *map; 1372 struct bio *bio; 1373 struct dm_io *io; 1374 sector_t sector; 1375 unsigned sector_count; 1376 }; 1377 1378 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1379 { 1380 bio->bi_iter.bi_sector = sector; 1381 bio->bi_iter.bi_size = to_bytes(len); 1382 } 1383 1384 /* 1385 * Creates a bio that consists of range of complete bvecs. 1386 */ 1387 static void clone_bio(struct dm_target_io *tio, struct bio *bio, 1388 sector_t sector, unsigned len) 1389 { 1390 struct bio *clone = &tio->clone; 1391 1392 __bio_clone_fast(clone, bio); 1393 1394 if (bio_integrity(bio)) 1395 bio_integrity_clone(clone, bio, GFP_NOIO); 1396 1397 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1398 clone->bi_iter.bi_size = to_bytes(len); 1399 1400 if (bio_integrity(bio)) 1401 bio_integrity_trim(clone, 0, len); 1402 } 1403 1404 static struct dm_target_io *alloc_tio(struct clone_info *ci, 1405 struct dm_target *ti, 1406 unsigned target_bio_nr) 1407 { 1408 struct dm_target_io *tio; 1409 struct bio *clone; 1410 1411 clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); 1412 tio = container_of(clone, struct dm_target_io, clone); 1413 1414 tio->io = ci->io; 1415 tio->ti = ti; 1416 tio->target_bio_nr = target_bio_nr; 1417 1418 return tio; 1419 } 1420 1421 static void __clone_and_map_simple_bio(struct clone_info *ci, 1422 struct dm_target *ti, 1423 unsigned target_bio_nr, unsigned *len) 1424 { 1425 struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr); 1426 struct bio *clone = &tio->clone; 1427 1428 tio->len_ptr = len; 1429 1430 __bio_clone_fast(clone, ci->bio); 1431 if (len) 1432 bio_setup_sector(clone, ci->sector, *len); 1433 1434 __map_bio(tio); 1435 } 1436 1437 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1438 unsigned num_bios, unsigned *len) 1439 { 1440 unsigned target_bio_nr; 1441 1442 for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++) 1443 __clone_and_map_simple_bio(ci, ti, target_bio_nr, len); 1444 } 1445 1446 static int __send_empty_flush(struct clone_info *ci) 1447 { 1448 unsigned target_nr = 0; 1449 struct dm_target *ti; 1450 1451 BUG_ON(bio_has_data(ci->bio)); 1452 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1453 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1454 1455 return 0; 1456 } 1457 1458 static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 1459 sector_t sector, unsigned *len) 1460 { 1461 struct bio *bio = ci->bio; 1462 struct dm_target_io *tio; 1463 unsigned target_bio_nr; 1464 unsigned num_target_bios = 1; 1465 1466 /* 1467 * Does the target want to receive duplicate copies of the bio? 1468 */ 1469 if (bio_data_dir(bio) == WRITE && ti->num_write_bios) 1470 num_target_bios = ti->num_write_bios(ti, bio); 1471 1472 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 1473 tio = alloc_tio(ci, ti, target_bio_nr); 1474 tio->len_ptr = len; 1475 clone_bio(tio, bio, sector, *len); 1476 __map_bio(tio); 1477 } 1478 } 1479 1480 typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 1481 1482 static unsigned get_num_discard_bios(struct dm_target *ti) 1483 { 1484 return ti->num_discard_bios; 1485 } 1486 1487 static unsigned get_num_write_same_bios(struct dm_target *ti) 1488 { 1489 return ti->num_write_same_bios; 1490 } 1491 1492 typedef bool (*is_split_required_fn)(struct dm_target *ti); 1493 1494 static bool is_split_required_for_discard(struct dm_target *ti) 1495 { 1496 return ti->split_discard_bios; 1497 } 1498 1499 static int __send_changing_extent_only(struct clone_info *ci, 1500 get_num_bios_fn get_num_bios, 1501 is_split_required_fn is_split_required) 1502 { 1503 struct dm_target *ti; 1504 unsigned len; 1505 unsigned num_bios; 1506 1507 do { 1508 ti = dm_table_find_target(ci->map, ci->sector); 1509 if (!dm_target_is_valid(ti)) 1510 return -EIO; 1511 1512 /* 1513 * Even though the device advertised support for this type of 1514 * request, that does not mean every target supports it, and 1515 * reconfiguration might also have changed that since the 1516 * check was performed. 1517 */ 1518 num_bios = get_num_bios ? get_num_bios(ti) : 0; 1519 if (!num_bios) 1520 return -EOPNOTSUPP; 1521 1522 if (is_split_required && !is_split_required(ti)) 1523 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 1524 else 1525 len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 1526 1527 __send_duplicate_bios(ci, ti, num_bios, &len); 1528 1529 ci->sector += len; 1530 } while (ci->sector_count -= len); 1531 1532 return 0; 1533 } 1534 1535 static int __send_discard(struct clone_info *ci) 1536 { 1537 return __send_changing_extent_only(ci, get_num_discard_bios, 1538 is_split_required_for_discard); 1539 } 1540 1541 static int __send_write_same(struct clone_info *ci) 1542 { 1543 return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); 1544 } 1545 1546 /* 1547 * Select the correct strategy for processing a non-flush bio. 1548 */ 1549 static int __split_and_process_non_flush(struct clone_info *ci) 1550 { 1551 struct bio *bio = ci->bio; 1552 struct dm_target *ti; 1553 unsigned len; 1554 1555 if (unlikely(bio->bi_rw & REQ_DISCARD)) 1556 return __send_discard(ci); 1557 else if (unlikely(bio->bi_rw & REQ_WRITE_SAME)) 1558 return __send_write_same(ci); 1559 1560 ti = dm_table_find_target(ci->map, ci->sector); 1561 if (!dm_target_is_valid(ti)) 1562 return -EIO; 1563 1564 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1565 1566 __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1567 1568 ci->sector += len; 1569 ci->sector_count -= len; 1570 1571 return 0; 1572 } 1573 1574 /* 1575 * Entry point to split a bio into clones and submit them to the targets. 1576 */ 1577 static void __split_and_process_bio(struct mapped_device *md, 1578 struct dm_table *map, struct bio *bio) 1579 { 1580 struct clone_info ci; 1581 int error = 0; 1582 1583 if (unlikely(!map)) { 1584 bio_io_error(bio); 1585 return; 1586 } 1587 1588 ci.map = map; 1589 ci.md = md; 1590 ci.io = alloc_io(md); 1591 ci.io->error = 0; 1592 atomic_set(&ci.io->io_count, 1); 1593 ci.io->bio = bio; 1594 ci.io->md = md; 1595 spin_lock_init(&ci.io->endio_lock); 1596 ci.sector = bio->bi_iter.bi_sector; 1597 1598 start_io_acct(ci.io); 1599 1600 if (bio->bi_rw & REQ_FLUSH) { 1601 ci.bio = &ci.md->flush_bio; 1602 ci.sector_count = 0; 1603 error = __send_empty_flush(&ci); 1604 /* dec_pending submits any data associated with flush */ 1605 } else { 1606 ci.bio = bio; 1607 ci.sector_count = bio_sectors(bio); 1608 while (ci.sector_count && !error) 1609 error = __split_and_process_non_flush(&ci); 1610 } 1611 1612 /* drop the extra reference count */ 1613 dec_pending(ci.io, error); 1614 } 1615 /*----------------------------------------------------------------- 1616 * CRUD END 1617 *---------------------------------------------------------------*/ 1618 1619 static int dm_merge_bvec(struct request_queue *q, 1620 struct bvec_merge_data *bvm, 1621 struct bio_vec *biovec) 1622 { 1623 struct mapped_device *md = q->queuedata; 1624 struct dm_table *map = dm_get_live_table_fast(md); 1625 struct dm_target *ti; 1626 sector_t max_sectors; 1627 int max_size = 0; 1628 1629 if (unlikely(!map)) 1630 goto out; 1631 1632 ti = dm_table_find_target(map, bvm->bi_sector); 1633 if (!dm_target_is_valid(ti)) 1634 goto out; 1635 1636 /* 1637 * Find maximum amount of I/O that won't need splitting 1638 */ 1639 max_sectors = min(max_io_len(bvm->bi_sector, ti), 1640 (sector_t) queue_max_sectors(q)); 1641 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 1642 if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */ 1643 max_size = 0; 1644 1645 /* 1646 * merge_bvec_fn() returns number of bytes 1647 * it can accept at this offset 1648 * max is precomputed maximal io size 1649 */ 1650 if (max_size && ti->type->merge) 1651 max_size = ti->type->merge(ti, bvm, biovec, max_size); 1652 /* 1653 * If the target doesn't support merge method and some of the devices 1654 * provided their merge_bvec method (we know this by looking for the 1655 * max_hw_sectors that dm_set_device_limits may set), then we can't 1656 * allow bios with multiple vector entries. So always set max_size 1657 * to 0, and the code below allows just one page. 1658 */ 1659 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) 1660 max_size = 0; 1661 1662 out: 1663 dm_put_live_table_fast(md); 1664 /* 1665 * Always allow an entire first page 1666 */ 1667 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) 1668 max_size = biovec->bv_len; 1669 1670 return max_size; 1671 } 1672 1673 /* 1674 * The request function that just remaps the bio built up by 1675 * dm_merge_bvec. 1676 */ 1677 static void _dm_request(struct request_queue *q, struct bio *bio) 1678 { 1679 int rw = bio_data_dir(bio); 1680 struct mapped_device *md = q->queuedata; 1681 int srcu_idx; 1682 struct dm_table *map; 1683 1684 map = dm_get_live_table(md, &srcu_idx); 1685 1686 generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0); 1687 1688 /* if we're suspended, we have to queue this io for later */ 1689 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 1690 dm_put_live_table(md, srcu_idx); 1691 1692 if (bio_rw(bio) != READA) 1693 queue_io(md, bio); 1694 else 1695 bio_io_error(bio); 1696 return; 1697 } 1698 1699 __split_and_process_bio(md, map, bio); 1700 dm_put_live_table(md, srcu_idx); 1701 return; 1702 } 1703 1704 int dm_request_based(struct mapped_device *md) 1705 { 1706 return blk_queue_stackable(md->queue); 1707 } 1708 1709 static void dm_request(struct request_queue *q, struct bio *bio) 1710 { 1711 struct mapped_device *md = q->queuedata; 1712 1713 if (dm_request_based(md)) 1714 blk_queue_bio(q, bio); 1715 else 1716 _dm_request(q, bio); 1717 } 1718 1719 static void dm_dispatch_clone_request(struct request *clone, struct request *rq) 1720 { 1721 int r; 1722 1723 if (blk_queue_io_stat(clone->q)) 1724 clone->cmd_flags |= REQ_IO_STAT; 1725 1726 clone->start_time = jiffies; 1727 r = blk_insert_cloned_request(clone->q, clone); 1728 if (r) 1729 /* must complete clone in terms of original request */ 1730 dm_complete_request(rq, r); 1731 } 1732 1733 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, 1734 void *data) 1735 { 1736 struct dm_rq_target_io *tio = data; 1737 struct dm_rq_clone_bio_info *info = 1738 container_of(bio, struct dm_rq_clone_bio_info, clone); 1739 1740 info->orig = bio_orig; 1741 info->tio = tio; 1742 bio->bi_end_io = end_clone_bio; 1743 1744 return 0; 1745 } 1746 1747 static int setup_clone(struct request *clone, struct request *rq, 1748 struct dm_rq_target_io *tio, gfp_t gfp_mask) 1749 { 1750 int r; 1751 1752 r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, 1753 dm_rq_bio_constructor, tio); 1754 if (r) 1755 return r; 1756 1757 clone->cmd = rq->cmd; 1758 clone->cmd_len = rq->cmd_len; 1759 clone->sense = rq->sense; 1760 clone->end_io = end_clone_request; 1761 clone->end_io_data = tio; 1762 1763 tio->clone = clone; 1764 1765 return 0; 1766 } 1767 1768 static struct request *clone_rq(struct request *rq, struct mapped_device *md, 1769 struct dm_rq_target_io *tio, gfp_t gfp_mask) 1770 { 1771 struct request *clone = alloc_clone_request(md, gfp_mask); 1772 1773 if (!clone) 1774 return NULL; 1775 1776 blk_rq_init(NULL, clone); 1777 if (setup_clone(clone, rq, tio, gfp_mask)) { 1778 /* -ENOMEM */ 1779 free_clone_request(md, clone); 1780 return NULL; 1781 } 1782 1783 return clone; 1784 } 1785 1786 static void map_tio_request(struct kthread_work *work); 1787 1788 static struct dm_rq_target_io *prep_tio(struct request *rq, 1789 struct mapped_device *md, gfp_t gfp_mask) 1790 { 1791 struct dm_rq_target_io *tio; 1792 1793 tio = alloc_rq_tio(md, gfp_mask); 1794 if (!tio) 1795 return NULL; 1796 1797 tio->md = md; 1798 tio->ti = NULL; 1799 tio->clone = NULL; 1800 tio->orig = rq; 1801 tio->error = 0; 1802 memset(&tio->info, 0, sizeof(tio->info)); 1803 init_kthread_work(&tio->work, map_tio_request); 1804 1805 if (!clone_rq(rq, md, tio, gfp_mask)) { 1806 free_rq_tio(tio); 1807 return NULL; 1808 } 1809 1810 return tio; 1811 } 1812 1813 /* 1814 * Called with the queue lock held. 1815 */ 1816 static int dm_prep_fn(struct request_queue *q, struct request *rq) 1817 { 1818 struct mapped_device *md = q->queuedata; 1819 struct dm_rq_target_io *tio; 1820 1821 if (unlikely(rq->special)) { 1822 DMWARN("Already has something in rq->special."); 1823 return BLKPREP_KILL; 1824 } 1825 1826 tio = prep_tio(rq, md, GFP_ATOMIC); 1827 if (!tio) 1828 return BLKPREP_DEFER; 1829 1830 rq->special = tio; 1831 rq->cmd_flags |= REQ_DONTPREP; 1832 1833 return BLKPREP_OK; 1834 } 1835 1836 /* 1837 * Returns: 1838 * 0 : the request has been processed (not requeued) 1839 * !0 : the request has been requeued 1840 */ 1841 static int map_request(struct dm_target *ti, struct request *rq, 1842 struct mapped_device *md) 1843 { 1844 int r, requeued = 0; 1845 struct dm_rq_target_io *tio = rq->special; 1846 struct request *clone = tio->clone; 1847 1848 r = ti->type->map_rq(ti, clone, &tio->info); 1849 switch (r) { 1850 case DM_MAPIO_SUBMITTED: 1851 /* The target has taken the I/O to submit by itself later */ 1852 break; 1853 case DM_MAPIO_REMAPPED: 1854 /* The target has remapped the I/O so dispatch it */ 1855 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 1856 blk_rq_pos(rq)); 1857 dm_dispatch_clone_request(clone, rq); 1858 break; 1859 case DM_MAPIO_REQUEUE: 1860 /* The target wants to requeue the I/O */ 1861 dm_requeue_unmapped_request(clone); 1862 requeued = 1; 1863 break; 1864 default: 1865 if (r > 0) { 1866 DMWARN("unimplemented target map return value: %d", r); 1867 BUG(); 1868 } 1869 1870 /* The target wants to complete the I/O */ 1871 dm_kill_unmapped_request(rq, r); 1872 break; 1873 } 1874 1875 return requeued; 1876 } 1877 1878 static void map_tio_request(struct kthread_work *work) 1879 { 1880 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); 1881 1882 map_request(tio->ti, tio->orig, tio->md); 1883 } 1884 1885 static void dm_start_request(struct mapped_device *md, struct request *orig) 1886 { 1887 blk_start_request(orig); 1888 atomic_inc(&md->pending[rq_data_dir(orig)]); 1889 1890 /* 1891 * Hold the md reference here for the in-flight I/O. 1892 * We can't rely on the reference count by device opener, 1893 * because the device may be closed during the request completion 1894 * when all bios are completed. 1895 * See the comment in rq_completed() too. 1896 */ 1897 dm_get(md); 1898 } 1899 1900 /* 1901 * q->request_fn for request-based dm. 1902 * Called with the queue lock held. 1903 */ 1904 static void dm_request_fn(struct request_queue *q) 1905 { 1906 struct mapped_device *md = q->queuedata; 1907 int srcu_idx; 1908 struct dm_table *map = dm_get_live_table(md, &srcu_idx); 1909 struct dm_target *ti; 1910 struct request *rq; 1911 struct dm_rq_target_io *tio; 1912 sector_t pos; 1913 1914 /* 1915 * For suspend, check blk_queue_stopped() and increment 1916 * ->pending within a single queue_lock not to increment the 1917 * number of in-flight I/Os after the queue is stopped in 1918 * dm_suspend(). 1919 */ 1920 while (!blk_queue_stopped(q)) { 1921 rq = blk_peek_request(q); 1922 if (!rq) 1923 goto delay_and_out; 1924 1925 /* always use block 0 to find the target for flushes for now */ 1926 pos = 0; 1927 if (!(rq->cmd_flags & REQ_FLUSH)) 1928 pos = blk_rq_pos(rq); 1929 1930 ti = dm_table_find_target(map, pos); 1931 if (!dm_target_is_valid(ti)) { 1932 /* 1933 * Must perform setup, that rq_completed() requires, 1934 * before calling dm_kill_unmapped_request 1935 */ 1936 DMERR_LIMIT("request attempted access beyond the end of device"); 1937 dm_start_request(md, rq); 1938 dm_kill_unmapped_request(rq, -EIO); 1939 continue; 1940 } 1941 1942 if (ti->type->busy && ti->type->busy(ti)) 1943 goto delay_and_out; 1944 1945 dm_start_request(md, rq); 1946 1947 tio = rq->special; 1948 /* Establish tio->ti before queuing work (map_tio_request) */ 1949 tio->ti = ti; 1950 queue_kthread_work(&md->kworker, &tio->work); 1951 BUG_ON(!irqs_disabled()); 1952 } 1953 1954 goto out; 1955 1956 delay_and_out: 1957 blk_delay_queue(q, HZ / 10); 1958 out: 1959 dm_put_live_table(md, srcu_idx); 1960 } 1961 1962 int dm_underlying_device_busy(struct request_queue *q) 1963 { 1964 return blk_lld_busy(q); 1965 } 1966 EXPORT_SYMBOL_GPL(dm_underlying_device_busy); 1967 1968 static int dm_lld_busy(struct request_queue *q) 1969 { 1970 int r; 1971 struct mapped_device *md = q->queuedata; 1972 struct dm_table *map = dm_get_live_table_fast(md); 1973 1974 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) 1975 r = 1; 1976 else 1977 r = dm_table_any_busy_target(map); 1978 1979 dm_put_live_table_fast(md); 1980 1981 return r; 1982 } 1983 1984 static int dm_any_congested(void *congested_data, int bdi_bits) 1985 { 1986 int r = bdi_bits; 1987 struct mapped_device *md = congested_data; 1988 struct dm_table *map; 1989 1990 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 1991 map = dm_get_live_table_fast(md); 1992 if (map) { 1993 /* 1994 * Request-based dm cares about only own queue for 1995 * the query about congestion status of request_queue 1996 */ 1997 if (dm_request_based(md)) 1998 r = md->queue->backing_dev_info.state & 1999 bdi_bits; 2000 else 2001 r = dm_table_any_congested(map, bdi_bits); 2002 } 2003 dm_put_live_table_fast(md); 2004 } 2005 2006 return r; 2007 } 2008 2009 /*----------------------------------------------------------------- 2010 * An IDR is used to keep track of allocated minor numbers. 2011 *---------------------------------------------------------------*/ 2012 static void free_minor(int minor) 2013 { 2014 spin_lock(&_minor_lock); 2015 idr_remove(&_minor_idr, minor); 2016 spin_unlock(&_minor_lock); 2017 } 2018 2019 /* 2020 * See if the device with a specific minor # is free. 2021 */ 2022 static int specific_minor(int minor) 2023 { 2024 int r; 2025 2026 if (minor >= (1 << MINORBITS)) 2027 return -EINVAL; 2028 2029 idr_preload(GFP_KERNEL); 2030 spin_lock(&_minor_lock); 2031 2032 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 2033 2034 spin_unlock(&_minor_lock); 2035 idr_preload_end(); 2036 if (r < 0) 2037 return r == -ENOSPC ? -EBUSY : r; 2038 return 0; 2039 } 2040 2041 static int next_free_minor(int *minor) 2042 { 2043 int r; 2044 2045 idr_preload(GFP_KERNEL); 2046 spin_lock(&_minor_lock); 2047 2048 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 2049 2050 spin_unlock(&_minor_lock); 2051 idr_preload_end(); 2052 if (r < 0) 2053 return r; 2054 *minor = r; 2055 return 0; 2056 } 2057 2058 static const struct block_device_operations dm_blk_dops; 2059 2060 static void dm_wq_work(struct work_struct *work); 2061 2062 static void dm_init_md_queue(struct mapped_device *md) 2063 { 2064 /* 2065 * Request-based dm devices cannot be stacked on top of bio-based dm 2066 * devices. The type of this dm device has not been decided yet. 2067 * The type is decided at the first table loading time. 2068 * To prevent problematic device stacking, clear the queue flag 2069 * for request stacking support until then. 2070 * 2071 * This queue is new, so no concurrency on the queue_flags. 2072 */ 2073 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 2074 2075 md->queue->queuedata = md; 2076 md->queue->backing_dev_info.congested_fn = dm_any_congested; 2077 md->queue->backing_dev_info.congested_data = md; 2078 blk_queue_make_request(md->queue, dm_request); 2079 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 2080 blk_queue_merge_bvec(md->queue, dm_merge_bvec); 2081 } 2082 2083 /* 2084 * Allocate and initialise a blank device with a given minor. 2085 */ 2086 static struct mapped_device *alloc_dev(int minor) 2087 { 2088 int r; 2089 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); 2090 void *old_md; 2091 2092 if (!md) { 2093 DMWARN("unable to allocate device, out of memory."); 2094 return NULL; 2095 } 2096 2097 if (!try_module_get(THIS_MODULE)) 2098 goto bad_module_get; 2099 2100 /* get a minor number for the dev */ 2101 if (minor == DM_ANY_MINOR) 2102 r = next_free_minor(&minor); 2103 else 2104 r = specific_minor(minor); 2105 if (r < 0) 2106 goto bad_minor; 2107 2108 r = init_srcu_struct(&md->io_barrier); 2109 if (r < 0) 2110 goto bad_io_barrier; 2111 2112 md->type = DM_TYPE_NONE; 2113 mutex_init(&md->suspend_lock); 2114 mutex_init(&md->type_lock); 2115 mutex_init(&md->table_devices_lock); 2116 spin_lock_init(&md->deferred_lock); 2117 atomic_set(&md->holders, 1); 2118 atomic_set(&md->open_count, 0); 2119 atomic_set(&md->event_nr, 0); 2120 atomic_set(&md->uevent_seq, 0); 2121 INIT_LIST_HEAD(&md->uevent_list); 2122 INIT_LIST_HEAD(&md->table_devices); 2123 spin_lock_init(&md->uevent_lock); 2124 2125 md->queue = blk_alloc_queue(GFP_KERNEL); 2126 if (!md->queue) 2127 goto bad_queue; 2128 2129 dm_init_md_queue(md); 2130 2131 md->disk = alloc_disk(1); 2132 if (!md->disk) 2133 goto bad_disk; 2134 2135 atomic_set(&md->pending[0], 0); 2136 atomic_set(&md->pending[1], 0); 2137 init_waitqueue_head(&md->wait); 2138 INIT_WORK(&md->work, dm_wq_work); 2139 init_waitqueue_head(&md->eventq); 2140 init_completion(&md->kobj_holder.completion); 2141 md->kworker_task = NULL; 2142 2143 md->disk->major = _major; 2144 md->disk->first_minor = minor; 2145 md->disk->fops = &dm_blk_dops; 2146 md->disk->queue = md->queue; 2147 md->disk->private_data = md; 2148 sprintf(md->disk->disk_name, "dm-%d", minor); 2149 add_disk(md->disk); 2150 format_dev_t(md->name, MKDEV(_major, minor)); 2151 2152 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 2153 if (!md->wq) 2154 goto bad_thread; 2155 2156 md->bdev = bdget_disk(md->disk, 0); 2157 if (!md->bdev) 2158 goto bad_bdev; 2159 2160 bio_init(&md->flush_bio); 2161 md->flush_bio.bi_bdev = md->bdev; 2162 md->flush_bio.bi_rw = WRITE_FLUSH; 2163 2164 dm_stats_init(&md->stats); 2165 2166 /* Populate the mapping, nobody knows we exist yet */ 2167 spin_lock(&_minor_lock); 2168 old_md = idr_replace(&_minor_idr, md, minor); 2169 spin_unlock(&_minor_lock); 2170 2171 BUG_ON(old_md != MINOR_ALLOCED); 2172 2173 return md; 2174 2175 bad_bdev: 2176 destroy_workqueue(md->wq); 2177 bad_thread: 2178 del_gendisk(md->disk); 2179 put_disk(md->disk); 2180 bad_disk: 2181 blk_cleanup_queue(md->queue); 2182 bad_queue: 2183 cleanup_srcu_struct(&md->io_barrier); 2184 bad_io_barrier: 2185 free_minor(minor); 2186 bad_minor: 2187 module_put(THIS_MODULE); 2188 bad_module_get: 2189 kfree(md); 2190 return NULL; 2191 } 2192 2193 static void unlock_fs(struct mapped_device *md); 2194 2195 static void free_dev(struct mapped_device *md) 2196 { 2197 int minor = MINOR(disk_devt(md->disk)); 2198 2199 unlock_fs(md); 2200 bdput(md->bdev); 2201 destroy_workqueue(md->wq); 2202 2203 if (md->kworker_task) 2204 kthread_stop(md->kworker_task); 2205 if (md->io_pool) 2206 mempool_destroy(md->io_pool); 2207 if (md->rq_pool) 2208 mempool_destroy(md->rq_pool); 2209 if (md->bs) 2210 bioset_free(md->bs); 2211 blk_integrity_unregister(md->disk); 2212 del_gendisk(md->disk); 2213 cleanup_srcu_struct(&md->io_barrier); 2214 free_table_devices(&md->table_devices); 2215 free_minor(minor); 2216 2217 spin_lock(&_minor_lock); 2218 md->disk->private_data = NULL; 2219 spin_unlock(&_minor_lock); 2220 2221 put_disk(md->disk); 2222 blk_cleanup_queue(md->queue); 2223 dm_stats_cleanup(&md->stats); 2224 module_put(THIS_MODULE); 2225 kfree(md); 2226 } 2227 2228 static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 2229 { 2230 struct dm_md_mempools *p = dm_table_get_md_mempools(t); 2231 2232 if (md->io_pool && md->bs) { 2233 /* The md already has necessary mempools. */ 2234 if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) { 2235 /* 2236 * Reload bioset because front_pad may have changed 2237 * because a different table was loaded. 2238 */ 2239 bioset_free(md->bs); 2240 md->bs = p->bs; 2241 p->bs = NULL; 2242 } 2243 /* 2244 * There's no need to reload with request-based dm 2245 * because the size of front_pad doesn't change. 2246 * Note for future: If you are to reload bioset, 2247 * prep-ed requests in the queue may refer 2248 * to bio from the old bioset, so you must walk 2249 * through the queue to unprep. 2250 */ 2251 goto out; 2252 } 2253 2254 BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); 2255 2256 md->io_pool = p->io_pool; 2257 p->io_pool = NULL; 2258 md->rq_pool = p->rq_pool; 2259 p->rq_pool = NULL; 2260 md->bs = p->bs; 2261 p->bs = NULL; 2262 2263 out: 2264 /* mempool bind completed, now no need any mempools in the table */ 2265 dm_table_free_md_mempools(t); 2266 } 2267 2268 /* 2269 * Bind a table to the device. 2270 */ 2271 static void event_callback(void *context) 2272 { 2273 unsigned long flags; 2274 LIST_HEAD(uevents); 2275 struct mapped_device *md = (struct mapped_device *) context; 2276 2277 spin_lock_irqsave(&md->uevent_lock, flags); 2278 list_splice_init(&md->uevent_list, &uevents); 2279 spin_unlock_irqrestore(&md->uevent_lock, flags); 2280 2281 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 2282 2283 atomic_inc(&md->event_nr); 2284 wake_up(&md->eventq); 2285 } 2286 2287 /* 2288 * Protected by md->suspend_lock obtained by dm_swap_table(). 2289 */ 2290 static void __set_size(struct mapped_device *md, sector_t size) 2291 { 2292 set_capacity(md->disk, size); 2293 2294 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 2295 } 2296 2297 /* 2298 * Return 1 if the queue has a compulsory merge_bvec_fn function. 2299 * 2300 * If this function returns 0, then the device is either a non-dm 2301 * device without a merge_bvec_fn, or it is a dm device that is 2302 * able to split any bios it receives that are too big. 2303 */ 2304 int dm_queue_merge_is_compulsory(struct request_queue *q) 2305 { 2306 struct mapped_device *dev_md; 2307 2308 if (!q->merge_bvec_fn) 2309 return 0; 2310 2311 if (q->make_request_fn == dm_request) { 2312 dev_md = q->queuedata; 2313 if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags)) 2314 return 0; 2315 } 2316 2317 return 1; 2318 } 2319 2320 static int dm_device_merge_is_compulsory(struct dm_target *ti, 2321 struct dm_dev *dev, sector_t start, 2322 sector_t len, void *data) 2323 { 2324 struct block_device *bdev = dev->bdev; 2325 struct request_queue *q = bdev_get_queue(bdev); 2326 2327 return dm_queue_merge_is_compulsory(q); 2328 } 2329 2330 /* 2331 * Return 1 if it is acceptable to ignore merge_bvec_fn based 2332 * on the properties of the underlying devices. 2333 */ 2334 static int dm_table_merge_is_optional(struct dm_table *table) 2335 { 2336 unsigned i = 0; 2337 struct dm_target *ti; 2338 2339 while (i < dm_table_get_num_targets(table)) { 2340 ti = dm_table_get_target(table, i++); 2341 2342 if (ti->type->iterate_devices && 2343 ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL)) 2344 return 0; 2345 } 2346 2347 return 1; 2348 } 2349 2350 /* 2351 * Returns old map, which caller must destroy. 2352 */ 2353 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2354 struct queue_limits *limits) 2355 { 2356 struct dm_table *old_map; 2357 struct request_queue *q = md->queue; 2358 sector_t size; 2359 int merge_is_optional; 2360 2361 size = dm_table_get_size(t); 2362 2363 /* 2364 * Wipe any geometry if the size of the table changed. 2365 */ 2366 if (size != dm_get_size(md)) 2367 memset(&md->geometry, 0, sizeof(md->geometry)); 2368 2369 __set_size(md, size); 2370 2371 dm_table_event_callback(t, event_callback, md); 2372 2373 /* 2374 * The queue hasn't been stopped yet, if the old table type wasn't 2375 * for request-based during suspension. So stop it to prevent 2376 * I/O mapping before resume. 2377 * This must be done before setting the queue restrictions, 2378 * because request-based dm may be run just after the setting. 2379 */ 2380 if (dm_table_request_based(t) && !blk_queue_stopped(q)) 2381 stop_queue(q); 2382 2383 __bind_mempools(md, t); 2384 2385 merge_is_optional = dm_table_merge_is_optional(t); 2386 2387 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2388 rcu_assign_pointer(md->map, t); 2389 md->immutable_target_type = dm_table_get_immutable_target_type(t); 2390 2391 dm_table_set_restrictions(t, q, limits); 2392 if (merge_is_optional) 2393 set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); 2394 else 2395 clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); 2396 if (old_map) 2397 dm_sync_table(md); 2398 2399 return old_map; 2400 } 2401 2402 /* 2403 * Returns unbound table for the caller to free. 2404 */ 2405 static struct dm_table *__unbind(struct mapped_device *md) 2406 { 2407 struct dm_table *map = rcu_dereference_protected(md->map, 1); 2408 2409 if (!map) 2410 return NULL; 2411 2412 dm_table_event_callback(map, NULL, NULL); 2413 RCU_INIT_POINTER(md->map, NULL); 2414 dm_sync_table(md); 2415 2416 return map; 2417 } 2418 2419 /* 2420 * Constructor for a new device. 2421 */ 2422 int dm_create(int minor, struct mapped_device **result) 2423 { 2424 struct mapped_device *md; 2425 2426 md = alloc_dev(minor); 2427 if (!md) 2428 return -ENXIO; 2429 2430 dm_sysfs_init(md); 2431 2432 *result = md; 2433 return 0; 2434 } 2435 2436 /* 2437 * Functions to manage md->type. 2438 * All are required to hold md->type_lock. 2439 */ 2440 void dm_lock_md_type(struct mapped_device *md) 2441 { 2442 mutex_lock(&md->type_lock); 2443 } 2444 2445 void dm_unlock_md_type(struct mapped_device *md) 2446 { 2447 mutex_unlock(&md->type_lock); 2448 } 2449 2450 void dm_set_md_type(struct mapped_device *md, unsigned type) 2451 { 2452 BUG_ON(!mutex_is_locked(&md->type_lock)); 2453 md->type = type; 2454 } 2455 2456 unsigned dm_get_md_type(struct mapped_device *md) 2457 { 2458 BUG_ON(!mutex_is_locked(&md->type_lock)); 2459 return md->type; 2460 } 2461 2462 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 2463 { 2464 return md->immutable_target_type; 2465 } 2466 2467 /* 2468 * The queue_limits are only valid as long as you have a reference 2469 * count on 'md'. 2470 */ 2471 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2472 { 2473 BUG_ON(!atomic_read(&md->holders)); 2474 return &md->queue->limits; 2475 } 2476 EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2477 2478 /* 2479 * Fully initialize a request-based queue (->elevator, ->request_fn, etc). 2480 */ 2481 static int dm_init_request_based_queue(struct mapped_device *md) 2482 { 2483 struct request_queue *q = NULL; 2484 2485 if (md->queue->elevator) 2486 return 1; 2487 2488 /* Fully initialize the queue */ 2489 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); 2490 if (!q) 2491 return 0; 2492 2493 md->queue = q; 2494 dm_init_md_queue(md); 2495 blk_queue_softirq_done(md->queue, dm_softirq_done); 2496 blk_queue_prep_rq(md->queue, dm_prep_fn); 2497 blk_queue_lld_busy(md->queue, dm_lld_busy); 2498 2499 /* Also initialize the request-based DM worker thread */ 2500 init_kthread_worker(&md->kworker); 2501 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, 2502 "kdmwork-%s", dm_device_name(md)); 2503 2504 elv_register_queue(md->queue); 2505 2506 return 1; 2507 } 2508 2509 /* 2510 * Setup the DM device's queue based on md's type 2511 */ 2512 int dm_setup_md_queue(struct mapped_device *md) 2513 { 2514 if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) && 2515 !dm_init_request_based_queue(md)) { 2516 DMWARN("Cannot initialize queue for request-based mapped device"); 2517 return -EINVAL; 2518 } 2519 2520 return 0; 2521 } 2522 2523 static struct mapped_device *dm_find_md(dev_t dev) 2524 { 2525 struct mapped_device *md; 2526 unsigned minor = MINOR(dev); 2527 2528 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 2529 return NULL; 2530 2531 spin_lock(&_minor_lock); 2532 2533 md = idr_find(&_minor_idr, minor); 2534 if (md && (md == MINOR_ALLOCED || 2535 (MINOR(disk_devt(dm_disk(md))) != minor) || 2536 dm_deleting_md(md) || 2537 test_bit(DMF_FREEING, &md->flags))) { 2538 md = NULL; 2539 goto out; 2540 } 2541 2542 out: 2543 spin_unlock(&_minor_lock); 2544 2545 return md; 2546 } 2547 2548 struct mapped_device *dm_get_md(dev_t dev) 2549 { 2550 struct mapped_device *md = dm_find_md(dev); 2551 2552 if (md) 2553 dm_get(md); 2554 2555 return md; 2556 } 2557 EXPORT_SYMBOL_GPL(dm_get_md); 2558 2559 void *dm_get_mdptr(struct mapped_device *md) 2560 { 2561 return md->interface_ptr; 2562 } 2563 2564 void dm_set_mdptr(struct mapped_device *md, void *ptr) 2565 { 2566 md->interface_ptr = ptr; 2567 } 2568 2569 void dm_get(struct mapped_device *md) 2570 { 2571 atomic_inc(&md->holders); 2572 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2573 } 2574 2575 const char *dm_device_name(struct mapped_device *md) 2576 { 2577 return md->name; 2578 } 2579 EXPORT_SYMBOL_GPL(dm_device_name); 2580 2581 static void __dm_destroy(struct mapped_device *md, bool wait) 2582 { 2583 struct dm_table *map; 2584 int srcu_idx; 2585 2586 might_sleep(); 2587 2588 spin_lock(&_minor_lock); 2589 map = dm_get_live_table(md, &srcu_idx); 2590 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2591 set_bit(DMF_FREEING, &md->flags); 2592 spin_unlock(&_minor_lock); 2593 2594 if (dm_request_based(md)) 2595 flush_kthread_worker(&md->kworker); 2596 2597 if (!dm_suspended_md(md)) { 2598 dm_table_presuspend_targets(map); 2599 dm_table_postsuspend_targets(map); 2600 } 2601 2602 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2603 dm_put_live_table(md, srcu_idx); 2604 2605 /* 2606 * Rare, but there may be I/O requests still going to complete, 2607 * for example. Wait for all references to disappear. 2608 * No one should increment the reference count of the mapped_device, 2609 * after the mapped_device state becomes DMF_FREEING. 2610 */ 2611 if (wait) 2612 while (atomic_read(&md->holders)) 2613 msleep(1); 2614 else if (atomic_read(&md->holders)) 2615 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2616 dm_device_name(md), atomic_read(&md->holders)); 2617 2618 dm_sysfs_exit(md); 2619 dm_table_destroy(__unbind(md)); 2620 free_dev(md); 2621 } 2622 2623 void dm_destroy(struct mapped_device *md) 2624 { 2625 __dm_destroy(md, true); 2626 } 2627 2628 void dm_destroy_immediate(struct mapped_device *md) 2629 { 2630 __dm_destroy(md, false); 2631 } 2632 2633 void dm_put(struct mapped_device *md) 2634 { 2635 atomic_dec(&md->holders); 2636 } 2637 EXPORT_SYMBOL_GPL(dm_put); 2638 2639 static int dm_wait_for_completion(struct mapped_device *md, int interruptible) 2640 { 2641 int r = 0; 2642 DECLARE_WAITQUEUE(wait, current); 2643 2644 add_wait_queue(&md->wait, &wait); 2645 2646 while (1) { 2647 set_current_state(interruptible); 2648 2649 if (!md_in_flight(md)) 2650 break; 2651 2652 if (interruptible == TASK_INTERRUPTIBLE && 2653 signal_pending(current)) { 2654 r = -EINTR; 2655 break; 2656 } 2657 2658 io_schedule(); 2659 } 2660 set_current_state(TASK_RUNNING); 2661 2662 remove_wait_queue(&md->wait, &wait); 2663 2664 return r; 2665 } 2666 2667 /* 2668 * Process the deferred bios 2669 */ 2670 static void dm_wq_work(struct work_struct *work) 2671 { 2672 struct mapped_device *md = container_of(work, struct mapped_device, 2673 work); 2674 struct bio *c; 2675 int srcu_idx; 2676 struct dm_table *map; 2677 2678 map = dm_get_live_table(md, &srcu_idx); 2679 2680 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2681 spin_lock_irq(&md->deferred_lock); 2682 c = bio_list_pop(&md->deferred); 2683 spin_unlock_irq(&md->deferred_lock); 2684 2685 if (!c) 2686 break; 2687 2688 if (dm_request_based(md)) 2689 generic_make_request(c); 2690 else 2691 __split_and_process_bio(md, map, c); 2692 } 2693 2694 dm_put_live_table(md, srcu_idx); 2695 } 2696 2697 static void dm_queue_flush(struct mapped_device *md) 2698 { 2699 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2700 smp_mb__after_atomic(); 2701 queue_work(md->wq, &md->work); 2702 } 2703 2704 /* 2705 * Swap in a new table, returning the old one for the caller to destroy. 2706 */ 2707 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 2708 { 2709 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2710 struct queue_limits limits; 2711 int r; 2712 2713 mutex_lock(&md->suspend_lock); 2714 2715 /* device must be suspended */ 2716 if (!dm_suspended_md(md)) 2717 goto out; 2718 2719 /* 2720 * If the new table has no data devices, retain the existing limits. 2721 * This helps multipath with queue_if_no_path if all paths disappear, 2722 * then new I/O is queued based on these limits, and then some paths 2723 * reappear. 2724 */ 2725 if (dm_table_has_no_data_devices(table)) { 2726 live_map = dm_get_live_table_fast(md); 2727 if (live_map) 2728 limits = md->queue->limits; 2729 dm_put_live_table_fast(md); 2730 } 2731 2732 if (!live_map) { 2733 r = dm_calculate_queue_limits(table, &limits); 2734 if (r) { 2735 map = ERR_PTR(r); 2736 goto out; 2737 } 2738 } 2739 2740 map = __bind(md, table, &limits); 2741 2742 out: 2743 mutex_unlock(&md->suspend_lock); 2744 return map; 2745 } 2746 2747 /* 2748 * Functions to lock and unlock any filesystem running on the 2749 * device. 2750 */ 2751 static int lock_fs(struct mapped_device *md) 2752 { 2753 int r; 2754 2755 WARN_ON(md->frozen_sb); 2756 2757 md->frozen_sb = freeze_bdev(md->bdev); 2758 if (IS_ERR(md->frozen_sb)) { 2759 r = PTR_ERR(md->frozen_sb); 2760 md->frozen_sb = NULL; 2761 return r; 2762 } 2763 2764 set_bit(DMF_FROZEN, &md->flags); 2765 2766 return 0; 2767 } 2768 2769 static void unlock_fs(struct mapped_device *md) 2770 { 2771 if (!test_bit(DMF_FROZEN, &md->flags)) 2772 return; 2773 2774 thaw_bdev(md->bdev, md->frozen_sb); 2775 md->frozen_sb = NULL; 2776 clear_bit(DMF_FROZEN, &md->flags); 2777 } 2778 2779 /* 2780 * If __dm_suspend returns 0, the device is completely quiescent 2781 * now. There is no request-processing activity. All new requests 2782 * are being added to md->deferred list. 2783 * 2784 * Caller must hold md->suspend_lock 2785 */ 2786 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2787 unsigned suspend_flags, int interruptible) 2788 { 2789 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2790 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2791 int r; 2792 2793 /* 2794 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 2795 * This flag is cleared before dm_suspend returns. 2796 */ 2797 if (noflush) 2798 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2799 2800 /* 2801 * This gets reverted if there's an error later and the targets 2802 * provide the .presuspend_undo hook. 2803 */ 2804 dm_table_presuspend_targets(map); 2805 2806 /* 2807 * Flush I/O to the device. 2808 * Any I/O submitted after lock_fs() may not be flushed. 2809 * noflush takes precedence over do_lockfs. 2810 * (lock_fs() flushes I/Os and waits for them to complete.) 2811 */ 2812 if (!noflush && do_lockfs) { 2813 r = lock_fs(md); 2814 if (r) { 2815 dm_table_presuspend_undo_targets(map); 2816 return r; 2817 } 2818 } 2819 2820 /* 2821 * Here we must make sure that no processes are submitting requests 2822 * to target drivers i.e. no one may be executing 2823 * __split_and_process_bio. This is called from dm_request and 2824 * dm_wq_work. 2825 * 2826 * To get all processes out of __split_and_process_bio in dm_request, 2827 * we take the write lock. To prevent any process from reentering 2828 * __split_and_process_bio from dm_request and quiesce the thread 2829 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 2830 * flush_workqueue(md->wq). 2831 */ 2832 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2833 if (map) 2834 synchronize_srcu(&md->io_barrier); 2835 2836 /* 2837 * Stop md->queue before flushing md->wq in case request-based 2838 * dm defers requests to md->wq from md->queue. 2839 */ 2840 if (dm_request_based(md)) { 2841 stop_queue(md->queue); 2842 flush_kthread_worker(&md->kworker); 2843 } 2844 2845 flush_workqueue(md->wq); 2846 2847 /* 2848 * At this point no more requests are entering target request routines. 2849 * We call dm_wait_for_completion to wait for all existing requests 2850 * to finish. 2851 */ 2852 r = dm_wait_for_completion(md, interruptible); 2853 2854 if (noflush) 2855 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2856 if (map) 2857 synchronize_srcu(&md->io_barrier); 2858 2859 /* were we interrupted ? */ 2860 if (r < 0) { 2861 dm_queue_flush(md); 2862 2863 if (dm_request_based(md)) 2864 start_queue(md->queue); 2865 2866 unlock_fs(md); 2867 dm_table_presuspend_undo_targets(map); 2868 /* pushback list is already flushed, so skip flush */ 2869 } 2870 2871 return r; 2872 } 2873 2874 /* 2875 * We need to be able to change a mapping table under a mounted 2876 * filesystem. For example we might want to move some data in 2877 * the background. Before the table can be swapped with 2878 * dm_bind_table, dm_suspend must be called to flush any in 2879 * flight bios and ensure that any further io gets deferred. 2880 */ 2881 /* 2882 * Suspend mechanism in request-based dm. 2883 * 2884 * 1. Flush all I/Os by lock_fs() if needed. 2885 * 2. Stop dispatching any I/O by stopping the request_queue. 2886 * 3. Wait for all in-flight I/Os to be completed or requeued. 2887 * 2888 * To abort suspend, start the request_queue. 2889 */ 2890 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2891 { 2892 struct dm_table *map = NULL; 2893 int r = 0; 2894 2895 retry: 2896 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2897 2898 if (dm_suspended_md(md)) { 2899 r = -EINVAL; 2900 goto out_unlock; 2901 } 2902 2903 if (dm_suspended_internally_md(md)) { 2904 /* already internally suspended, wait for internal resume */ 2905 mutex_unlock(&md->suspend_lock); 2906 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2907 if (r) 2908 return r; 2909 goto retry; 2910 } 2911 2912 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2913 2914 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE); 2915 if (r) 2916 goto out_unlock; 2917 2918 set_bit(DMF_SUSPENDED, &md->flags); 2919 2920 dm_table_postsuspend_targets(map); 2921 2922 out_unlock: 2923 mutex_unlock(&md->suspend_lock); 2924 return r; 2925 } 2926 2927 static int __dm_resume(struct mapped_device *md, struct dm_table *map) 2928 { 2929 if (map) { 2930 int r = dm_table_resume_targets(map); 2931 if (r) 2932 return r; 2933 } 2934 2935 dm_queue_flush(md); 2936 2937 /* 2938 * Flushing deferred I/Os must be done after targets are resumed 2939 * so that mapping of targets can work correctly. 2940 * Request-based dm is queueing the deferred I/Os in its request_queue. 2941 */ 2942 if (dm_request_based(md)) 2943 start_queue(md->queue); 2944 2945 unlock_fs(md); 2946 2947 return 0; 2948 } 2949 2950 int dm_resume(struct mapped_device *md) 2951 { 2952 int r = -EINVAL; 2953 struct dm_table *map = NULL; 2954 2955 retry: 2956 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2957 2958 if (!dm_suspended_md(md)) 2959 goto out; 2960 2961 if (dm_suspended_internally_md(md)) { 2962 /* already internally suspended, wait for internal resume */ 2963 mutex_unlock(&md->suspend_lock); 2964 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2965 if (r) 2966 return r; 2967 goto retry; 2968 } 2969 2970 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2971 if (!map || !dm_table_get_size(map)) 2972 goto out; 2973 2974 r = __dm_resume(md, map); 2975 if (r) 2976 goto out; 2977 2978 clear_bit(DMF_SUSPENDED, &md->flags); 2979 2980 r = 0; 2981 out: 2982 mutex_unlock(&md->suspend_lock); 2983 2984 return r; 2985 } 2986 2987 /* 2988 * Internal suspend/resume works like userspace-driven suspend. It waits 2989 * until all bios finish and prevents issuing new bios to the target drivers. 2990 * It may be used only from the kernel. 2991 */ 2992 2993 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2994 { 2995 struct dm_table *map = NULL; 2996 2997 if (dm_suspended_internally_md(md)) 2998 return; /* nested internal suspend */ 2999 3000 if (dm_suspended_md(md)) { 3001 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3002 return; /* nest suspend */ 3003 } 3004 3005 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3006 3007 /* 3008 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 3009 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 3010 * would require changing .presuspend to return an error -- avoid this 3011 * until there is a need for more elaborate variants of internal suspend. 3012 */ 3013 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE); 3014 3015 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3016 3017 dm_table_postsuspend_targets(map); 3018 } 3019 3020 static void __dm_internal_resume(struct mapped_device *md) 3021 { 3022 if (!dm_suspended_internally_md(md)) 3023 return; /* resume from nested internal suspend */ 3024 3025 if (dm_suspended_md(md)) 3026 goto done; /* resume from nested suspend */ 3027 3028 /* 3029 * NOTE: existing callers don't need to call dm_table_resume_targets 3030 * (which may fail -- so best to avoid it for now by passing NULL map) 3031 */ 3032 (void) __dm_resume(md, NULL); 3033 3034 done: 3035 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3036 smp_mb__after_atomic(); 3037 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 3038 } 3039 3040 void dm_internal_suspend_noflush(struct mapped_device *md) 3041 { 3042 mutex_lock(&md->suspend_lock); 3043 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 3044 mutex_unlock(&md->suspend_lock); 3045 } 3046 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 3047 3048 void dm_internal_resume(struct mapped_device *md) 3049 { 3050 mutex_lock(&md->suspend_lock); 3051 __dm_internal_resume(md); 3052 mutex_unlock(&md->suspend_lock); 3053 } 3054 EXPORT_SYMBOL_GPL(dm_internal_resume); 3055 3056 /* 3057 * Fast variants of internal suspend/resume hold md->suspend_lock, 3058 * which prevents interaction with userspace-driven suspend. 3059 */ 3060 3061 void dm_internal_suspend_fast(struct mapped_device *md) 3062 { 3063 mutex_lock(&md->suspend_lock); 3064 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 3065 return; 3066 3067 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 3068 synchronize_srcu(&md->io_barrier); 3069 flush_workqueue(md->wq); 3070 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 3071 } 3072 3073 void dm_internal_resume_fast(struct mapped_device *md) 3074 { 3075 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 3076 goto done; 3077 3078 dm_queue_flush(md); 3079 3080 done: 3081 mutex_unlock(&md->suspend_lock); 3082 } 3083 3084 /*----------------------------------------------------------------- 3085 * Event notification. 3086 *---------------------------------------------------------------*/ 3087 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 3088 unsigned cookie) 3089 { 3090 char udev_cookie[DM_COOKIE_LENGTH]; 3091 char *envp[] = { udev_cookie, NULL }; 3092 3093 if (!cookie) 3094 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 3095 else { 3096 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 3097 DM_COOKIE_ENV_VAR_NAME, cookie); 3098 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 3099 action, envp); 3100 } 3101 } 3102 3103 uint32_t dm_next_uevent_seq(struct mapped_device *md) 3104 { 3105 return atomic_add_return(1, &md->uevent_seq); 3106 } 3107 3108 uint32_t dm_get_event_nr(struct mapped_device *md) 3109 { 3110 return atomic_read(&md->event_nr); 3111 } 3112 3113 int dm_wait_event(struct mapped_device *md, int event_nr) 3114 { 3115 return wait_event_interruptible(md->eventq, 3116 (event_nr != atomic_read(&md->event_nr))); 3117 } 3118 3119 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 3120 { 3121 unsigned long flags; 3122 3123 spin_lock_irqsave(&md->uevent_lock, flags); 3124 list_add(elist, &md->uevent_list); 3125 spin_unlock_irqrestore(&md->uevent_lock, flags); 3126 } 3127 3128 /* 3129 * The gendisk is only valid as long as you have a reference 3130 * count on 'md'. 3131 */ 3132 struct gendisk *dm_disk(struct mapped_device *md) 3133 { 3134 return md->disk; 3135 } 3136 3137 struct kobject *dm_kobject(struct mapped_device *md) 3138 { 3139 return &md->kobj_holder.kobj; 3140 } 3141 3142 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 3143 { 3144 struct mapped_device *md; 3145 3146 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 3147 3148 if (test_bit(DMF_FREEING, &md->flags) || 3149 dm_deleting_md(md)) 3150 return NULL; 3151 3152 dm_get(md); 3153 return md; 3154 } 3155 3156 int dm_suspended_md(struct mapped_device *md) 3157 { 3158 return test_bit(DMF_SUSPENDED, &md->flags); 3159 } 3160 3161 int dm_suspended_internally_md(struct mapped_device *md) 3162 { 3163 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3164 } 3165 3166 int dm_test_deferred_remove_flag(struct mapped_device *md) 3167 { 3168 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 3169 } 3170 3171 int dm_suspended(struct dm_target *ti) 3172 { 3173 return dm_suspended_md(dm_table_get_md(ti->table)); 3174 } 3175 EXPORT_SYMBOL_GPL(dm_suspended); 3176 3177 int dm_noflush_suspending(struct dm_target *ti) 3178 { 3179 return __noflush_suspending(dm_table_get_md(ti->table)); 3180 } 3181 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 3182 3183 struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size) 3184 { 3185 struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL); 3186 struct kmem_cache *cachep; 3187 unsigned int pool_size; 3188 unsigned int front_pad; 3189 3190 if (!pools) 3191 return NULL; 3192 3193 if (type == DM_TYPE_BIO_BASED) { 3194 cachep = _io_cache; 3195 pool_size = dm_get_reserved_bio_based_ios(); 3196 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 3197 } else if (type == DM_TYPE_REQUEST_BASED) { 3198 cachep = _rq_tio_cache; 3199 pool_size = dm_get_reserved_rq_based_ios(); 3200 pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); 3201 if (!pools->rq_pool) 3202 goto out; 3203 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 3204 /* per_bio_data_size is not used. See __bind_mempools(). */ 3205 WARN_ON(per_bio_data_size != 0); 3206 } else 3207 goto out; 3208 3209 pools->io_pool = mempool_create_slab_pool(pool_size, cachep); 3210 if (!pools->io_pool) 3211 goto out; 3212 3213 pools->bs = bioset_create_nobvec(pool_size, front_pad); 3214 if (!pools->bs) 3215 goto out; 3216 3217 if (integrity && bioset_integrity_create(pools->bs, pool_size)) 3218 goto out; 3219 3220 return pools; 3221 3222 out: 3223 dm_free_md_mempools(pools); 3224 3225 return NULL; 3226 } 3227 3228 void dm_free_md_mempools(struct dm_md_mempools *pools) 3229 { 3230 if (!pools) 3231 return; 3232 3233 if (pools->io_pool) 3234 mempool_destroy(pools->io_pool); 3235 3236 if (pools->rq_pool) 3237 mempool_destroy(pools->rq_pool); 3238 3239 if (pools->bs) 3240 bioset_free(pools->bs); 3241 3242 kfree(pools); 3243 } 3244 3245 static const struct block_device_operations dm_blk_dops = { 3246 .open = dm_blk_open, 3247 .release = dm_blk_close, 3248 .ioctl = dm_blk_ioctl, 3249 .getgeo = dm_blk_getgeo, 3250 .owner = THIS_MODULE 3251 }; 3252 3253 /* 3254 * module hooks 3255 */ 3256 module_init(dm_init); 3257 module_exit(dm_exit); 3258 3259 module_param(major, uint, 0); 3260 MODULE_PARM_DESC(major, "The major number of the device mapper"); 3261 3262 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3263 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3264 3265 module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); 3266 MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); 3267 3268 MODULE_DESCRIPTION(DM_NAME " driver"); 3269 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3270 MODULE_LICENSE("GPL"); 3271