1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm.h" 9 #include "dm-uevent.h" 10 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/mutex.h> 14 #include <linux/moduleparam.h> 15 #include <linux/blkpg.h> 16 #include <linux/bio.h> 17 #include <linux/mempool.h> 18 #include <linux/slab.h> 19 #include <linux/idr.h> 20 #include <linux/hdreg.h> 21 #include <linux/delay.h> 22 #include <linux/wait.h> 23 #include <linux/kthread.h> 24 #include <linux/ktime.h> 25 #include <linux/elevator.h> /* for rq_end_sector() */ 26 #include <linux/blk-mq.h> 27 #include <linux/pr.h> 28 29 #include <trace/events/block.h> 30 31 #define DM_MSG_PREFIX "core" 32 33 #ifdef CONFIG_PRINTK 34 /* 35 * ratelimit state to be used in DMXXX_LIMIT(). 36 */ 37 DEFINE_RATELIMIT_STATE(dm_ratelimit_state, 38 DEFAULT_RATELIMIT_INTERVAL, 39 DEFAULT_RATELIMIT_BURST); 40 EXPORT_SYMBOL(dm_ratelimit_state); 41 #endif 42 43 /* 44 * Cookies are numeric values sent with CHANGE and REMOVE 45 * uevents while resuming, removing or renaming the device. 46 */ 47 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 48 #define DM_COOKIE_LENGTH 24 49 50 static const char *_name = DM_NAME; 51 52 static unsigned int major = 0; 53 static unsigned int _major = 0; 54 55 static DEFINE_IDR(_minor_idr); 56 57 static DEFINE_SPINLOCK(_minor_lock); 58 59 static void do_deferred_remove(struct work_struct *w); 60 61 static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 62 63 static struct workqueue_struct *deferred_remove_workqueue; 64 65 /* 66 * For bio-based dm. 67 * One of these is allocated per bio. 68 */ 69 struct dm_io { 70 struct mapped_device *md; 71 int error; 72 atomic_t io_count; 73 struct bio *bio; 74 unsigned long start_time; 75 spinlock_t endio_lock; 76 struct dm_stats_aux stats_aux; 77 }; 78 79 /* 80 * For request-based dm. 81 * One of these is allocated per request. 82 */ 83 struct dm_rq_target_io { 84 struct mapped_device *md; 85 struct dm_target *ti; 86 struct request *orig, *clone; 87 struct kthread_work work; 88 int error; 89 union map_info info; 90 struct dm_stats_aux stats_aux; 91 unsigned long duration_jiffies; 92 unsigned n_sectors; 93 }; 94 95 /* 96 * For request-based dm - the bio clones we allocate are embedded in these 97 * structs. 98 * 99 * We allocate these with bio_alloc_bioset, using the front_pad parameter when 100 * the bioset is created - this means the bio has to come at the end of the 101 * struct. 102 */ 103 struct dm_rq_clone_bio_info { 104 struct bio *orig; 105 struct dm_rq_target_io *tio; 106 struct bio clone; 107 }; 108 109 #define MINOR_ALLOCED ((void *)-1) 110 111 /* 112 * Bits for the md->flags field. 113 */ 114 #define DMF_BLOCK_IO_FOR_SUSPEND 0 115 #define DMF_SUSPENDED 1 116 #define DMF_FROZEN 2 117 #define DMF_FREEING 3 118 #define DMF_DELETING 4 119 #define DMF_NOFLUSH_SUSPENDING 5 120 #define DMF_DEFERRED_REMOVE 6 121 #define DMF_SUSPENDED_INTERNALLY 7 122 123 /* 124 * Work processed by per-device workqueue. 125 */ 126 struct mapped_device { 127 struct srcu_struct io_barrier; 128 struct mutex suspend_lock; 129 130 /* 131 * The current mapping (struct dm_table *). 132 * Use dm_get_live_table{_fast} or take suspend_lock for 133 * dereference. 134 */ 135 void __rcu *map; 136 137 struct list_head table_devices; 138 struct mutex table_devices_lock; 139 140 unsigned long flags; 141 142 struct request_queue *queue; 143 int numa_node_id; 144 145 unsigned type; 146 /* Protect queue and type against concurrent access. */ 147 struct mutex type_lock; 148 149 atomic_t holders; 150 atomic_t open_count; 151 152 struct dm_target *immutable_target; 153 struct target_type *immutable_target_type; 154 155 struct gendisk *disk; 156 char name[16]; 157 158 void *interface_ptr; 159 160 /* 161 * A list of ios that arrived while we were suspended. 162 */ 163 atomic_t pending[2]; 164 wait_queue_head_t wait; 165 struct work_struct work; 166 spinlock_t deferred_lock; 167 struct bio_list deferred; 168 169 /* 170 * Event handling. 171 */ 172 wait_queue_head_t eventq; 173 atomic_t event_nr; 174 atomic_t uevent_seq; 175 struct list_head uevent_list; 176 spinlock_t uevent_lock; /* Protect access to uevent_list */ 177 178 /* the number of internal suspends */ 179 unsigned internal_suspend_count; 180 181 /* 182 * Processing queue (flush) 183 */ 184 struct workqueue_struct *wq; 185 186 /* 187 * io objects are allocated from here. 188 */ 189 mempool_t *io_pool; 190 mempool_t *rq_pool; 191 192 struct bio_set *bs; 193 194 /* 195 * freeze/thaw support require holding onto a super block 196 */ 197 struct super_block *frozen_sb; 198 199 /* forced geometry settings */ 200 struct hd_geometry geometry; 201 202 struct block_device *bdev; 203 204 /* kobject and completion */ 205 struct dm_kobject_holder kobj_holder; 206 207 /* zero-length flush that will be cloned and submitted to targets */ 208 struct bio flush_bio; 209 210 struct dm_stats stats; 211 212 struct kthread_worker kworker; 213 struct task_struct *kworker_task; 214 215 /* for request-based merge heuristic in dm_request_fn() */ 216 unsigned seq_rq_merge_deadline_usecs; 217 int last_rq_rw; 218 sector_t last_rq_pos; 219 ktime_t last_rq_start_time; 220 221 /* for blk-mq request-based DM support */ 222 struct blk_mq_tag_set *tag_set; 223 bool use_blk_mq:1; 224 bool init_tio_pdu:1; 225 }; 226 227 #ifdef CONFIG_DM_MQ_DEFAULT 228 static bool use_blk_mq = true; 229 #else 230 static bool use_blk_mq = false; 231 #endif 232 233 #define DM_MQ_NR_HW_QUEUES 1 234 #define DM_MQ_QUEUE_DEPTH 2048 235 #define DM_NUMA_NODE NUMA_NO_NODE 236 237 static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES; 238 static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH; 239 static int dm_numa_node = DM_NUMA_NODE; 240 241 bool dm_use_blk_mq(struct mapped_device *md) 242 { 243 return md->use_blk_mq; 244 } 245 EXPORT_SYMBOL_GPL(dm_use_blk_mq); 246 247 /* 248 * For mempools pre-allocation at the table loading time. 249 */ 250 struct dm_md_mempools { 251 mempool_t *io_pool; 252 mempool_t *rq_pool; 253 struct bio_set *bs; 254 }; 255 256 struct table_device { 257 struct list_head list; 258 atomic_t count; 259 struct dm_dev dm_dev; 260 }; 261 262 #define RESERVED_BIO_BASED_IOS 16 263 #define RESERVED_REQUEST_BASED_IOS 256 264 #define RESERVED_MAX_IOS 1024 265 static struct kmem_cache *_io_cache; 266 static struct kmem_cache *_rq_tio_cache; 267 static struct kmem_cache *_rq_cache; 268 269 /* 270 * Bio-based DM's mempools' reserved IOs set by the user. 271 */ 272 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 273 274 /* 275 * Request-based DM's mempools' reserved IOs set by the user. 276 */ 277 static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; 278 279 static int __dm_get_module_param_int(int *module_param, int min, int max) 280 { 281 int param = ACCESS_ONCE(*module_param); 282 int modified_param = 0; 283 bool modified = true; 284 285 if (param < min) 286 modified_param = min; 287 else if (param > max) 288 modified_param = max; 289 else 290 modified = false; 291 292 if (modified) { 293 (void)cmpxchg(module_param, param, modified_param); 294 param = modified_param; 295 } 296 297 return param; 298 } 299 300 static unsigned __dm_get_module_param(unsigned *module_param, 301 unsigned def, unsigned max) 302 { 303 unsigned param = ACCESS_ONCE(*module_param); 304 unsigned modified_param = 0; 305 306 if (!param) 307 modified_param = def; 308 else if (param > max) 309 modified_param = max; 310 311 if (modified_param) { 312 (void)cmpxchg(module_param, param, modified_param); 313 param = modified_param; 314 } 315 316 return param; 317 } 318 319 unsigned dm_get_reserved_bio_based_ios(void) 320 { 321 return __dm_get_module_param(&reserved_bio_based_ios, 322 RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS); 323 } 324 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 325 326 unsigned dm_get_reserved_rq_based_ios(void) 327 { 328 return __dm_get_module_param(&reserved_rq_based_ios, 329 RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS); 330 } 331 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); 332 333 static unsigned dm_get_blk_mq_nr_hw_queues(void) 334 { 335 return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32); 336 } 337 338 static unsigned dm_get_blk_mq_queue_depth(void) 339 { 340 return __dm_get_module_param(&dm_mq_queue_depth, 341 DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH); 342 } 343 344 static unsigned dm_get_numa_node(void) 345 { 346 return __dm_get_module_param_int(&dm_numa_node, 347 DM_NUMA_NODE, num_online_nodes() - 1); 348 } 349 350 static int __init local_init(void) 351 { 352 int r = -ENOMEM; 353 354 /* allocate a slab for the dm_ios */ 355 _io_cache = KMEM_CACHE(dm_io, 0); 356 if (!_io_cache) 357 return r; 358 359 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 360 if (!_rq_tio_cache) 361 goto out_free_io_cache; 362 363 _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request), 364 __alignof__(struct request), 0, NULL); 365 if (!_rq_cache) 366 goto out_free_rq_tio_cache; 367 368 r = dm_uevent_init(); 369 if (r) 370 goto out_free_rq_cache; 371 372 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 373 if (!deferred_remove_workqueue) { 374 r = -ENOMEM; 375 goto out_uevent_exit; 376 } 377 378 _major = major; 379 r = register_blkdev(_major, _name); 380 if (r < 0) 381 goto out_free_workqueue; 382 383 if (!_major) 384 _major = r; 385 386 return 0; 387 388 out_free_workqueue: 389 destroy_workqueue(deferred_remove_workqueue); 390 out_uevent_exit: 391 dm_uevent_exit(); 392 out_free_rq_cache: 393 kmem_cache_destroy(_rq_cache); 394 out_free_rq_tio_cache: 395 kmem_cache_destroy(_rq_tio_cache); 396 out_free_io_cache: 397 kmem_cache_destroy(_io_cache); 398 399 return r; 400 } 401 402 static void local_exit(void) 403 { 404 flush_scheduled_work(); 405 destroy_workqueue(deferred_remove_workqueue); 406 407 kmem_cache_destroy(_rq_cache); 408 kmem_cache_destroy(_rq_tio_cache); 409 kmem_cache_destroy(_io_cache); 410 unregister_blkdev(_major, _name); 411 dm_uevent_exit(); 412 413 _major = 0; 414 415 DMINFO("cleaned up"); 416 } 417 418 static int (*_inits[])(void) __initdata = { 419 local_init, 420 dm_target_init, 421 dm_linear_init, 422 dm_stripe_init, 423 dm_io_init, 424 dm_kcopyd_init, 425 dm_interface_init, 426 dm_statistics_init, 427 }; 428 429 static void (*_exits[])(void) = { 430 local_exit, 431 dm_target_exit, 432 dm_linear_exit, 433 dm_stripe_exit, 434 dm_io_exit, 435 dm_kcopyd_exit, 436 dm_interface_exit, 437 dm_statistics_exit, 438 }; 439 440 static int __init dm_init(void) 441 { 442 const int count = ARRAY_SIZE(_inits); 443 444 int r, i; 445 446 for (i = 0; i < count; i++) { 447 r = _inits[i](); 448 if (r) 449 goto bad; 450 } 451 452 return 0; 453 454 bad: 455 while (i--) 456 _exits[i](); 457 458 return r; 459 } 460 461 static void __exit dm_exit(void) 462 { 463 int i = ARRAY_SIZE(_exits); 464 465 while (i--) 466 _exits[i](); 467 468 /* 469 * Should be empty by this point. 470 */ 471 idr_destroy(&_minor_idr); 472 } 473 474 /* 475 * Block device functions 476 */ 477 int dm_deleting_md(struct mapped_device *md) 478 { 479 return test_bit(DMF_DELETING, &md->flags); 480 } 481 482 static int dm_blk_open(struct block_device *bdev, fmode_t mode) 483 { 484 struct mapped_device *md; 485 486 spin_lock(&_minor_lock); 487 488 md = bdev->bd_disk->private_data; 489 if (!md) 490 goto out; 491 492 if (test_bit(DMF_FREEING, &md->flags) || 493 dm_deleting_md(md)) { 494 md = NULL; 495 goto out; 496 } 497 498 dm_get(md); 499 atomic_inc(&md->open_count); 500 out: 501 spin_unlock(&_minor_lock); 502 503 return md ? 0 : -ENXIO; 504 } 505 506 static void dm_blk_close(struct gendisk *disk, fmode_t mode) 507 { 508 struct mapped_device *md; 509 510 spin_lock(&_minor_lock); 511 512 md = disk->private_data; 513 if (WARN_ON(!md)) 514 goto out; 515 516 if (atomic_dec_and_test(&md->open_count) && 517 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 518 queue_work(deferred_remove_workqueue, &deferred_remove_work); 519 520 dm_put(md); 521 out: 522 spin_unlock(&_minor_lock); 523 } 524 525 int dm_open_count(struct mapped_device *md) 526 { 527 return atomic_read(&md->open_count); 528 } 529 530 /* 531 * Guarantees nothing is using the device before it's deleted. 532 */ 533 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 534 { 535 int r = 0; 536 537 spin_lock(&_minor_lock); 538 539 if (dm_open_count(md)) { 540 r = -EBUSY; 541 if (mark_deferred) 542 set_bit(DMF_DEFERRED_REMOVE, &md->flags); 543 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 544 r = -EEXIST; 545 else 546 set_bit(DMF_DELETING, &md->flags); 547 548 spin_unlock(&_minor_lock); 549 550 return r; 551 } 552 553 int dm_cancel_deferred_remove(struct mapped_device *md) 554 { 555 int r = 0; 556 557 spin_lock(&_minor_lock); 558 559 if (test_bit(DMF_DELETING, &md->flags)) 560 r = -EBUSY; 561 else 562 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 563 564 spin_unlock(&_minor_lock); 565 566 return r; 567 } 568 569 static void do_deferred_remove(struct work_struct *w) 570 { 571 dm_deferred_remove(); 572 } 573 574 sector_t dm_get_size(struct mapped_device *md) 575 { 576 return get_capacity(md->disk); 577 } 578 579 struct request_queue *dm_get_md_queue(struct mapped_device *md) 580 { 581 return md->queue; 582 } 583 584 struct dm_stats *dm_get_stats(struct mapped_device *md) 585 { 586 return &md->stats; 587 } 588 589 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 590 { 591 struct mapped_device *md = bdev->bd_disk->private_data; 592 593 return dm_get_geometry(md, geo); 594 } 595 596 static int dm_grab_bdev_for_ioctl(struct mapped_device *md, 597 struct block_device **bdev, 598 fmode_t *mode) 599 { 600 struct dm_target *tgt; 601 struct dm_table *map; 602 int srcu_idx, r; 603 604 retry: 605 r = -ENOTTY; 606 map = dm_get_live_table(md, &srcu_idx); 607 if (!map || !dm_table_get_size(map)) 608 goto out; 609 610 /* We only support devices that have a single target */ 611 if (dm_table_get_num_targets(map) != 1) 612 goto out; 613 614 tgt = dm_table_get_target(map, 0); 615 if (!tgt->type->prepare_ioctl) 616 goto out; 617 618 if (dm_suspended_md(md)) { 619 r = -EAGAIN; 620 goto out; 621 } 622 623 r = tgt->type->prepare_ioctl(tgt, bdev, mode); 624 if (r < 0) 625 goto out; 626 627 bdgrab(*bdev); 628 dm_put_live_table(md, srcu_idx); 629 return r; 630 631 out: 632 dm_put_live_table(md, srcu_idx); 633 if (r == -ENOTCONN && !fatal_signal_pending(current)) { 634 msleep(10); 635 goto retry; 636 } 637 return r; 638 } 639 640 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 641 unsigned int cmd, unsigned long arg) 642 { 643 struct mapped_device *md = bdev->bd_disk->private_data; 644 int r; 645 646 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 647 if (r < 0) 648 return r; 649 650 if (r > 0) { 651 /* 652 * Target determined this ioctl is being issued against 653 * a logical partition of the parent bdev; so extra 654 * validation is needed. 655 */ 656 r = scsi_verify_blk_ioctl(NULL, cmd); 657 if (r) 658 goto out; 659 } 660 661 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 662 out: 663 bdput(bdev); 664 return r; 665 } 666 667 static struct dm_io *alloc_io(struct mapped_device *md) 668 { 669 return mempool_alloc(md->io_pool, GFP_NOIO); 670 } 671 672 static void free_io(struct mapped_device *md, struct dm_io *io) 673 { 674 mempool_free(io, md->io_pool); 675 } 676 677 static void free_tio(struct mapped_device *md, struct dm_target_io *tio) 678 { 679 bio_put(&tio->clone); 680 } 681 682 static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md, 683 gfp_t gfp_mask) 684 { 685 return mempool_alloc(md->io_pool, gfp_mask); 686 } 687 688 static void free_old_rq_tio(struct dm_rq_target_io *tio) 689 { 690 mempool_free(tio, tio->md->io_pool); 691 } 692 693 static struct request *alloc_old_clone_request(struct mapped_device *md, 694 gfp_t gfp_mask) 695 { 696 return mempool_alloc(md->rq_pool, gfp_mask); 697 } 698 699 static void free_old_clone_request(struct mapped_device *md, struct request *rq) 700 { 701 mempool_free(rq, md->rq_pool); 702 } 703 704 static int md_in_flight(struct mapped_device *md) 705 { 706 return atomic_read(&md->pending[READ]) + 707 atomic_read(&md->pending[WRITE]); 708 } 709 710 static void start_io_acct(struct dm_io *io) 711 { 712 struct mapped_device *md = io->md; 713 struct bio *bio = io->bio; 714 int cpu; 715 int rw = bio_data_dir(bio); 716 717 io->start_time = jiffies; 718 719 cpu = part_stat_lock(); 720 part_round_stats(cpu, &dm_disk(md)->part0); 721 part_stat_unlock(); 722 atomic_set(&dm_disk(md)->part0.in_flight[rw], 723 atomic_inc_return(&md->pending[rw])); 724 725 if (unlikely(dm_stats_used(&md->stats))) 726 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 727 bio_sectors(bio), false, 0, &io->stats_aux); 728 } 729 730 static void end_io_acct(struct dm_io *io) 731 { 732 struct mapped_device *md = io->md; 733 struct bio *bio = io->bio; 734 unsigned long duration = jiffies - io->start_time; 735 int pending; 736 int rw = bio_data_dir(bio); 737 738 generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time); 739 740 if (unlikely(dm_stats_used(&md->stats))) 741 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 742 bio_sectors(bio), true, duration, &io->stats_aux); 743 744 /* 745 * After this is decremented the bio must not be touched if it is 746 * a flush. 747 */ 748 pending = atomic_dec_return(&md->pending[rw]); 749 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); 750 pending += atomic_read(&md->pending[rw^0x1]); 751 752 /* nudge anyone waiting on suspend queue */ 753 if (!pending) 754 wake_up(&md->wait); 755 } 756 757 /* 758 * Add the bio to the list of deferred io. 759 */ 760 static void queue_io(struct mapped_device *md, struct bio *bio) 761 { 762 unsigned long flags; 763 764 spin_lock_irqsave(&md->deferred_lock, flags); 765 bio_list_add(&md->deferred, bio); 766 spin_unlock_irqrestore(&md->deferred_lock, flags); 767 queue_work(md->wq, &md->work); 768 } 769 770 /* 771 * Everyone (including functions in this file), should use this 772 * function to access the md->map field, and make sure they call 773 * dm_put_live_table() when finished. 774 */ 775 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 776 { 777 *srcu_idx = srcu_read_lock(&md->io_barrier); 778 779 return srcu_dereference(md->map, &md->io_barrier); 780 } 781 782 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 783 { 784 srcu_read_unlock(&md->io_barrier, srcu_idx); 785 } 786 787 void dm_sync_table(struct mapped_device *md) 788 { 789 synchronize_srcu(&md->io_barrier); 790 synchronize_rcu_expedited(); 791 } 792 793 /* 794 * A fast alternative to dm_get_live_table/dm_put_live_table. 795 * The caller must not block between these two functions. 796 */ 797 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 798 { 799 rcu_read_lock(); 800 return rcu_dereference(md->map); 801 } 802 803 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 804 { 805 rcu_read_unlock(); 806 } 807 808 /* 809 * Open a table device so we can use it as a map destination. 810 */ 811 static int open_table_device(struct table_device *td, dev_t dev, 812 struct mapped_device *md) 813 { 814 static char *_claim_ptr = "I belong to device-mapper"; 815 struct block_device *bdev; 816 817 int r; 818 819 BUG_ON(td->dm_dev.bdev); 820 821 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); 822 if (IS_ERR(bdev)) 823 return PTR_ERR(bdev); 824 825 r = bd_link_disk_holder(bdev, dm_disk(md)); 826 if (r) { 827 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 828 return r; 829 } 830 831 td->dm_dev.bdev = bdev; 832 return 0; 833 } 834 835 /* 836 * Close a table device that we've been using. 837 */ 838 static void close_table_device(struct table_device *td, struct mapped_device *md) 839 { 840 if (!td->dm_dev.bdev) 841 return; 842 843 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 844 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 845 td->dm_dev.bdev = NULL; 846 } 847 848 static struct table_device *find_table_device(struct list_head *l, dev_t dev, 849 fmode_t mode) { 850 struct table_device *td; 851 852 list_for_each_entry(td, l, list) 853 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 854 return td; 855 856 return NULL; 857 } 858 859 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 860 struct dm_dev **result) { 861 int r; 862 struct table_device *td; 863 864 mutex_lock(&md->table_devices_lock); 865 td = find_table_device(&md->table_devices, dev, mode); 866 if (!td) { 867 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 868 if (!td) { 869 mutex_unlock(&md->table_devices_lock); 870 return -ENOMEM; 871 } 872 873 td->dm_dev.mode = mode; 874 td->dm_dev.bdev = NULL; 875 876 if ((r = open_table_device(td, dev, md))) { 877 mutex_unlock(&md->table_devices_lock); 878 kfree(td); 879 return r; 880 } 881 882 format_dev_t(td->dm_dev.name, dev); 883 884 atomic_set(&td->count, 0); 885 list_add(&td->list, &md->table_devices); 886 } 887 atomic_inc(&td->count); 888 mutex_unlock(&md->table_devices_lock); 889 890 *result = &td->dm_dev; 891 return 0; 892 } 893 EXPORT_SYMBOL_GPL(dm_get_table_device); 894 895 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 896 { 897 struct table_device *td = container_of(d, struct table_device, dm_dev); 898 899 mutex_lock(&md->table_devices_lock); 900 if (atomic_dec_and_test(&td->count)) { 901 close_table_device(td, md); 902 list_del(&td->list); 903 kfree(td); 904 } 905 mutex_unlock(&md->table_devices_lock); 906 } 907 EXPORT_SYMBOL(dm_put_table_device); 908 909 static void free_table_devices(struct list_head *devices) 910 { 911 struct list_head *tmp, *next; 912 913 list_for_each_safe(tmp, next, devices) { 914 struct table_device *td = list_entry(tmp, struct table_device, list); 915 916 DMWARN("dm_destroy: %s still exists with %d references", 917 td->dm_dev.name, atomic_read(&td->count)); 918 kfree(td); 919 } 920 } 921 922 /* 923 * Get the geometry associated with a dm device 924 */ 925 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 926 { 927 *geo = md->geometry; 928 929 return 0; 930 } 931 932 /* 933 * Set the geometry of a device. 934 */ 935 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 936 { 937 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 938 939 if (geo->start > sz) { 940 DMWARN("Start sector is beyond the geometry limits."); 941 return -EINVAL; 942 } 943 944 md->geometry = *geo; 945 946 return 0; 947 } 948 949 /*----------------------------------------------------------------- 950 * CRUD START: 951 * A more elegant soln is in the works that uses the queue 952 * merge fn, unfortunately there are a couple of changes to 953 * the block layer that I want to make for this. So in the 954 * interests of getting something for people to use I give 955 * you this clearly demarcated crap. 956 *---------------------------------------------------------------*/ 957 958 static int __noflush_suspending(struct mapped_device *md) 959 { 960 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 961 } 962 963 /* 964 * Decrements the number of outstanding ios that a bio has been 965 * cloned into, completing the original io if necc. 966 */ 967 static void dec_pending(struct dm_io *io, int error) 968 { 969 unsigned long flags; 970 int io_error; 971 struct bio *bio; 972 struct mapped_device *md = io->md; 973 974 /* Push-back supersedes any I/O errors */ 975 if (unlikely(error)) { 976 spin_lock_irqsave(&io->endio_lock, flags); 977 if (!(io->error > 0 && __noflush_suspending(md))) 978 io->error = error; 979 spin_unlock_irqrestore(&io->endio_lock, flags); 980 } 981 982 if (atomic_dec_and_test(&io->io_count)) { 983 if (io->error == DM_ENDIO_REQUEUE) { 984 /* 985 * Target requested pushing back the I/O. 986 */ 987 spin_lock_irqsave(&md->deferred_lock, flags); 988 if (__noflush_suspending(md)) 989 bio_list_add_head(&md->deferred, io->bio); 990 else 991 /* noflush suspend was interrupted. */ 992 io->error = -EIO; 993 spin_unlock_irqrestore(&md->deferred_lock, flags); 994 } 995 996 io_error = io->error; 997 bio = io->bio; 998 end_io_acct(io); 999 free_io(md, io); 1000 1001 if (io_error == DM_ENDIO_REQUEUE) 1002 return; 1003 1004 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { 1005 /* 1006 * Preflush done for flush with data, reissue 1007 * without REQ_FLUSH. 1008 */ 1009 bio->bi_rw &= ~REQ_FLUSH; 1010 queue_io(md, bio); 1011 } else { 1012 /* done with normal IO or empty flush */ 1013 trace_block_bio_complete(md->queue, bio, io_error); 1014 bio->bi_error = io_error; 1015 bio_endio(bio); 1016 } 1017 } 1018 } 1019 1020 static void disable_write_same(struct mapped_device *md) 1021 { 1022 struct queue_limits *limits = dm_get_queue_limits(md); 1023 1024 /* device doesn't really support WRITE SAME, disable it */ 1025 limits->max_write_same_sectors = 0; 1026 } 1027 1028 static void clone_endio(struct bio *bio) 1029 { 1030 int error = bio->bi_error; 1031 int r = error; 1032 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1033 struct dm_io *io = tio->io; 1034 struct mapped_device *md = tio->io->md; 1035 dm_endio_fn endio = tio->ti->type->end_io; 1036 1037 if (endio) { 1038 r = endio(tio->ti, bio, error); 1039 if (r < 0 || r == DM_ENDIO_REQUEUE) 1040 /* 1041 * error and requeue request are handled 1042 * in dec_pending(). 1043 */ 1044 error = r; 1045 else if (r == DM_ENDIO_INCOMPLETE) 1046 /* The target will handle the io */ 1047 return; 1048 else if (r) { 1049 DMWARN("unimplemented target endio return value: %d", r); 1050 BUG(); 1051 } 1052 } 1053 1054 if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) && 1055 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) 1056 disable_write_same(md); 1057 1058 free_tio(md, tio); 1059 dec_pending(io, error); 1060 } 1061 1062 /* 1063 * Partial completion handling for request-based dm 1064 */ 1065 static void end_clone_bio(struct bio *clone) 1066 { 1067 struct dm_rq_clone_bio_info *info = 1068 container_of(clone, struct dm_rq_clone_bio_info, clone); 1069 struct dm_rq_target_io *tio = info->tio; 1070 struct bio *bio = info->orig; 1071 unsigned int nr_bytes = info->orig->bi_iter.bi_size; 1072 int error = clone->bi_error; 1073 1074 bio_put(clone); 1075 1076 if (tio->error) 1077 /* 1078 * An error has already been detected on the request. 1079 * Once error occurred, just let clone->end_io() handle 1080 * the remainder. 1081 */ 1082 return; 1083 else if (error) { 1084 /* 1085 * Don't notice the error to the upper layer yet. 1086 * The error handling decision is made by the target driver, 1087 * when the request is completed. 1088 */ 1089 tio->error = error; 1090 return; 1091 } 1092 1093 /* 1094 * I/O for the bio successfully completed. 1095 * Notice the data completion to the upper layer. 1096 */ 1097 1098 /* 1099 * bios are processed from the head of the list. 1100 * So the completing bio should always be rq->bio. 1101 * If it's not, something wrong is happening. 1102 */ 1103 if (tio->orig->bio != bio) 1104 DMERR("bio completion is going in the middle of the request"); 1105 1106 /* 1107 * Update the original request. 1108 * Do not use blk_end_request() here, because it may complete 1109 * the original request before the clone, and break the ordering. 1110 */ 1111 blk_update_request(tio->orig, 0, nr_bytes); 1112 } 1113 1114 static struct dm_rq_target_io *tio_from_request(struct request *rq) 1115 { 1116 return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); 1117 } 1118 1119 static void rq_end_stats(struct mapped_device *md, struct request *orig) 1120 { 1121 if (unlikely(dm_stats_used(&md->stats))) { 1122 struct dm_rq_target_io *tio = tio_from_request(orig); 1123 tio->duration_jiffies = jiffies - tio->duration_jiffies; 1124 dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig), 1125 tio->n_sectors, true, tio->duration_jiffies, 1126 &tio->stats_aux); 1127 } 1128 } 1129 1130 /* 1131 * Don't touch any member of the md after calling this function because 1132 * the md may be freed in dm_put() at the end of this function. 1133 * Or do dm_get() before calling this function and dm_put() later. 1134 */ 1135 static void rq_completed(struct mapped_device *md, int rw, bool run_queue) 1136 { 1137 atomic_dec(&md->pending[rw]); 1138 1139 /* nudge anyone waiting on suspend queue */ 1140 if (!md_in_flight(md)) 1141 wake_up(&md->wait); 1142 1143 /* 1144 * Run this off this callpath, as drivers could invoke end_io while 1145 * inside their request_fn (and holding the queue lock). Calling 1146 * back into ->request_fn() could deadlock attempting to grab the 1147 * queue lock again. 1148 */ 1149 if (!md->queue->mq_ops && run_queue) 1150 blk_run_queue_async(md->queue); 1151 1152 /* 1153 * dm_put() must be at the end of this function. See the comment above 1154 */ 1155 dm_put(md); 1156 } 1157 1158 static void free_rq_clone(struct request *clone) 1159 { 1160 struct dm_rq_target_io *tio = clone->end_io_data; 1161 struct mapped_device *md = tio->md; 1162 1163 blk_rq_unprep_clone(clone); 1164 1165 if (md->type == DM_TYPE_MQ_REQUEST_BASED) 1166 /* stacked on blk-mq queue(s) */ 1167 tio->ti->type->release_clone_rq(clone); 1168 else if (!md->queue->mq_ops) 1169 /* request_fn queue stacked on request_fn queue(s) */ 1170 free_old_clone_request(md, clone); 1171 1172 if (!md->queue->mq_ops) 1173 free_old_rq_tio(tio); 1174 } 1175 1176 /* 1177 * Complete the clone and the original request. 1178 * Must be called without clone's queue lock held, 1179 * see end_clone_request() for more details. 1180 */ 1181 static void dm_end_request(struct request *clone, int error) 1182 { 1183 int rw = rq_data_dir(clone); 1184 struct dm_rq_target_io *tio = clone->end_io_data; 1185 struct mapped_device *md = tio->md; 1186 struct request *rq = tio->orig; 1187 1188 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 1189 rq->errors = clone->errors; 1190 rq->resid_len = clone->resid_len; 1191 1192 if (rq->sense) 1193 /* 1194 * We are using the sense buffer of the original 1195 * request. 1196 * So setting the length of the sense data is enough. 1197 */ 1198 rq->sense_len = clone->sense_len; 1199 } 1200 1201 free_rq_clone(clone); 1202 rq_end_stats(md, rq); 1203 if (!rq->q->mq_ops) 1204 blk_end_request_all(rq, error); 1205 else 1206 blk_mq_end_request(rq, error); 1207 rq_completed(md, rw, true); 1208 } 1209 1210 static void dm_unprep_request(struct request *rq) 1211 { 1212 struct dm_rq_target_io *tio = tio_from_request(rq); 1213 struct request *clone = tio->clone; 1214 1215 if (!rq->q->mq_ops) { 1216 rq->special = NULL; 1217 rq->cmd_flags &= ~REQ_DONTPREP; 1218 } 1219 1220 if (clone) 1221 free_rq_clone(clone); 1222 else if (!tio->md->queue->mq_ops) 1223 free_old_rq_tio(tio); 1224 } 1225 1226 /* 1227 * Requeue the original request of a clone. 1228 */ 1229 static void dm_old_requeue_request(struct request *rq) 1230 { 1231 struct request_queue *q = rq->q; 1232 unsigned long flags; 1233 1234 spin_lock_irqsave(q->queue_lock, flags); 1235 blk_requeue_request(q, rq); 1236 blk_run_queue_async(q); 1237 spin_unlock_irqrestore(q->queue_lock, flags); 1238 } 1239 1240 static void dm_mq_requeue_request(struct request *rq) 1241 { 1242 struct request_queue *q = rq->q; 1243 unsigned long flags; 1244 1245 blk_mq_requeue_request(rq); 1246 spin_lock_irqsave(q->queue_lock, flags); 1247 if (!blk_queue_stopped(q)) 1248 blk_mq_kick_requeue_list(q); 1249 spin_unlock_irqrestore(q->queue_lock, flags); 1250 } 1251 1252 static void dm_requeue_original_request(struct mapped_device *md, 1253 struct request *rq) 1254 { 1255 int rw = rq_data_dir(rq); 1256 1257 rq_end_stats(md, rq); 1258 dm_unprep_request(rq); 1259 1260 if (!rq->q->mq_ops) 1261 dm_old_requeue_request(rq); 1262 else 1263 dm_mq_requeue_request(rq); 1264 1265 rq_completed(md, rw, false); 1266 } 1267 1268 static void dm_old_stop_queue(struct request_queue *q) 1269 { 1270 unsigned long flags; 1271 1272 spin_lock_irqsave(q->queue_lock, flags); 1273 if (blk_queue_stopped(q)) { 1274 spin_unlock_irqrestore(q->queue_lock, flags); 1275 return; 1276 } 1277 1278 blk_stop_queue(q); 1279 spin_unlock_irqrestore(q->queue_lock, flags); 1280 } 1281 1282 static void dm_stop_queue(struct request_queue *q) 1283 { 1284 if (!q->mq_ops) 1285 dm_old_stop_queue(q); 1286 else 1287 blk_mq_stop_hw_queues(q); 1288 } 1289 1290 static void dm_old_start_queue(struct request_queue *q) 1291 { 1292 unsigned long flags; 1293 1294 spin_lock_irqsave(q->queue_lock, flags); 1295 if (blk_queue_stopped(q)) 1296 blk_start_queue(q); 1297 spin_unlock_irqrestore(q->queue_lock, flags); 1298 } 1299 1300 static void dm_start_queue(struct request_queue *q) 1301 { 1302 if (!q->mq_ops) 1303 dm_old_start_queue(q); 1304 else { 1305 blk_mq_start_stopped_hw_queues(q, true); 1306 blk_mq_kick_requeue_list(q); 1307 } 1308 } 1309 1310 static void dm_done(struct request *clone, int error, bool mapped) 1311 { 1312 int r = error; 1313 struct dm_rq_target_io *tio = clone->end_io_data; 1314 dm_request_endio_fn rq_end_io = NULL; 1315 1316 if (tio->ti) { 1317 rq_end_io = tio->ti->type->rq_end_io; 1318 1319 if (mapped && rq_end_io) 1320 r = rq_end_io(tio->ti, clone, error, &tio->info); 1321 } 1322 1323 if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) && 1324 !clone->q->limits.max_write_same_sectors)) 1325 disable_write_same(tio->md); 1326 1327 if (r <= 0) 1328 /* The target wants to complete the I/O */ 1329 dm_end_request(clone, r); 1330 else if (r == DM_ENDIO_INCOMPLETE) 1331 /* The target will handle the I/O */ 1332 return; 1333 else if (r == DM_ENDIO_REQUEUE) 1334 /* The target wants to requeue the I/O */ 1335 dm_requeue_original_request(tio->md, tio->orig); 1336 else { 1337 DMWARN("unimplemented target endio return value: %d", r); 1338 BUG(); 1339 } 1340 } 1341 1342 /* 1343 * Request completion handler for request-based dm 1344 */ 1345 static void dm_softirq_done(struct request *rq) 1346 { 1347 bool mapped = true; 1348 struct dm_rq_target_io *tio = tio_from_request(rq); 1349 struct request *clone = tio->clone; 1350 int rw; 1351 1352 if (!clone) { 1353 rq_end_stats(tio->md, rq); 1354 rw = rq_data_dir(rq); 1355 if (!rq->q->mq_ops) { 1356 blk_end_request_all(rq, tio->error); 1357 rq_completed(tio->md, rw, false); 1358 free_old_rq_tio(tio); 1359 } else { 1360 blk_mq_end_request(rq, tio->error); 1361 rq_completed(tio->md, rw, false); 1362 } 1363 return; 1364 } 1365 1366 if (rq->cmd_flags & REQ_FAILED) 1367 mapped = false; 1368 1369 dm_done(clone, tio->error, mapped); 1370 } 1371 1372 /* 1373 * Complete the clone and the original request with the error status 1374 * through softirq context. 1375 */ 1376 static void dm_complete_request(struct request *rq, int error) 1377 { 1378 struct dm_rq_target_io *tio = tio_from_request(rq); 1379 1380 tio->error = error; 1381 if (!rq->q->mq_ops) 1382 blk_complete_request(rq); 1383 else 1384 blk_mq_complete_request(rq, error); 1385 } 1386 1387 /* 1388 * Complete the not-mapped clone and the original request with the error status 1389 * through softirq context. 1390 * Target's rq_end_io() function isn't called. 1391 * This may be used when the target's map_rq() or clone_and_map_rq() functions fail. 1392 */ 1393 static void dm_kill_unmapped_request(struct request *rq, int error) 1394 { 1395 rq->cmd_flags |= REQ_FAILED; 1396 dm_complete_request(rq, error); 1397 } 1398 1399 /* 1400 * Called with the clone's queue lock held (in the case of .request_fn) 1401 */ 1402 static void end_clone_request(struct request *clone, int error) 1403 { 1404 struct dm_rq_target_io *tio = clone->end_io_data; 1405 1406 if (!clone->q->mq_ops) { 1407 /* 1408 * For just cleaning up the information of the queue in which 1409 * the clone was dispatched. 1410 * The clone is *NOT* freed actually here because it is alloced 1411 * from dm own mempool (REQ_ALLOCED isn't set). 1412 */ 1413 __blk_put_request(clone->q, clone); 1414 } 1415 1416 /* 1417 * Actual request completion is done in a softirq context which doesn't 1418 * hold the clone's queue lock. Otherwise, deadlock could occur because: 1419 * - another request may be submitted by the upper level driver 1420 * of the stacking during the completion 1421 * - the submission which requires queue lock may be done 1422 * against this clone's queue 1423 */ 1424 dm_complete_request(tio->orig, error); 1425 } 1426 1427 /* 1428 * Return maximum size of I/O possible at the supplied sector up to the current 1429 * target boundary. 1430 */ 1431 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 1432 { 1433 sector_t target_offset = dm_target_offset(ti, sector); 1434 1435 return ti->len - target_offset; 1436 } 1437 1438 static sector_t max_io_len(sector_t sector, struct dm_target *ti) 1439 { 1440 sector_t len = max_io_len_target_boundary(sector, ti); 1441 sector_t offset, max_len; 1442 1443 /* 1444 * Does the target need to split even further? 1445 */ 1446 if (ti->max_io_len) { 1447 offset = dm_target_offset(ti, sector); 1448 if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 1449 max_len = sector_div(offset, ti->max_io_len); 1450 else 1451 max_len = offset & (ti->max_io_len - 1); 1452 max_len = ti->max_io_len - max_len; 1453 1454 if (len > max_len) 1455 len = max_len; 1456 } 1457 1458 return len; 1459 } 1460 1461 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1462 { 1463 if (len > UINT_MAX) { 1464 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1465 (unsigned long long)len, UINT_MAX); 1466 ti->error = "Maximum size of target IO is too large"; 1467 return -EINVAL; 1468 } 1469 1470 ti->max_io_len = (uint32_t) len; 1471 1472 return 0; 1473 } 1474 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1475 1476 /* 1477 * A target may call dm_accept_partial_bio only from the map routine. It is 1478 * allowed for all bio types except REQ_FLUSH. 1479 * 1480 * dm_accept_partial_bio informs the dm that the target only wants to process 1481 * additional n_sectors sectors of the bio and the rest of the data should be 1482 * sent in a next bio. 1483 * 1484 * A diagram that explains the arithmetics: 1485 * +--------------------+---------------+-------+ 1486 * | 1 | 2 | 3 | 1487 * +--------------------+---------------+-------+ 1488 * 1489 * <-------------- *tio->len_ptr ---------------> 1490 * <------- bi_size -------> 1491 * <-- n_sectors --> 1492 * 1493 * Region 1 was already iterated over with bio_advance or similar function. 1494 * (it may be empty if the target doesn't use bio_advance) 1495 * Region 2 is the remaining bio size that the target wants to process. 1496 * (it may be empty if region 1 is non-empty, although there is no reason 1497 * to make it empty) 1498 * The target requires that region 3 is to be sent in the next bio. 1499 * 1500 * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 1501 * the partially processed part (the sum of regions 1+2) must be the same for all 1502 * copies of the bio. 1503 */ 1504 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 1505 { 1506 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1507 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 1508 BUG_ON(bio->bi_rw & REQ_FLUSH); 1509 BUG_ON(bi_size > *tio->len_ptr); 1510 BUG_ON(n_sectors > bi_size); 1511 *tio->len_ptr -= bi_size - n_sectors; 1512 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1513 } 1514 EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1515 1516 static void __map_bio(struct dm_target_io *tio) 1517 { 1518 int r; 1519 sector_t sector; 1520 struct mapped_device *md; 1521 struct bio *clone = &tio->clone; 1522 struct dm_target *ti = tio->ti; 1523 1524 clone->bi_end_io = clone_endio; 1525 1526 /* 1527 * Map the clone. If r == 0 we don't need to do 1528 * anything, the target has assumed ownership of 1529 * this io. 1530 */ 1531 atomic_inc(&tio->io->io_count); 1532 sector = clone->bi_iter.bi_sector; 1533 r = ti->type->map(ti, clone); 1534 if (r == DM_MAPIO_REMAPPED) { 1535 /* the bio has been remapped so dispatch it */ 1536 1537 trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, 1538 tio->io->bio->bi_bdev->bd_dev, sector); 1539 1540 generic_make_request(clone); 1541 } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 1542 /* error the io and bail out, or requeue it if needed */ 1543 md = tio->io->md; 1544 dec_pending(tio->io, r); 1545 free_tio(md, tio); 1546 } else if (r != DM_MAPIO_SUBMITTED) { 1547 DMWARN("unimplemented target map return value: %d", r); 1548 BUG(); 1549 } 1550 } 1551 1552 struct clone_info { 1553 struct mapped_device *md; 1554 struct dm_table *map; 1555 struct bio *bio; 1556 struct dm_io *io; 1557 sector_t sector; 1558 unsigned sector_count; 1559 }; 1560 1561 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1562 { 1563 bio->bi_iter.bi_sector = sector; 1564 bio->bi_iter.bi_size = to_bytes(len); 1565 } 1566 1567 /* 1568 * Creates a bio that consists of range of complete bvecs. 1569 */ 1570 static int clone_bio(struct dm_target_io *tio, struct bio *bio, 1571 sector_t sector, unsigned len) 1572 { 1573 struct bio *clone = &tio->clone; 1574 1575 __bio_clone_fast(clone, bio); 1576 1577 if (bio_integrity(bio)) { 1578 int r = bio_integrity_clone(clone, bio, GFP_NOIO); 1579 if (r < 0) 1580 return r; 1581 } 1582 1583 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1584 clone->bi_iter.bi_size = to_bytes(len); 1585 1586 if (bio_integrity(bio)) 1587 bio_integrity_trim(clone, 0, len); 1588 1589 return 0; 1590 } 1591 1592 static struct dm_target_io *alloc_tio(struct clone_info *ci, 1593 struct dm_target *ti, 1594 unsigned target_bio_nr) 1595 { 1596 struct dm_target_io *tio; 1597 struct bio *clone; 1598 1599 clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); 1600 tio = container_of(clone, struct dm_target_io, clone); 1601 1602 tio->io = ci->io; 1603 tio->ti = ti; 1604 tio->target_bio_nr = target_bio_nr; 1605 1606 return tio; 1607 } 1608 1609 static void __clone_and_map_simple_bio(struct clone_info *ci, 1610 struct dm_target *ti, 1611 unsigned target_bio_nr, unsigned *len) 1612 { 1613 struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr); 1614 struct bio *clone = &tio->clone; 1615 1616 tio->len_ptr = len; 1617 1618 __bio_clone_fast(clone, ci->bio); 1619 if (len) 1620 bio_setup_sector(clone, ci->sector, *len); 1621 1622 __map_bio(tio); 1623 } 1624 1625 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1626 unsigned num_bios, unsigned *len) 1627 { 1628 unsigned target_bio_nr; 1629 1630 for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++) 1631 __clone_and_map_simple_bio(ci, ti, target_bio_nr, len); 1632 } 1633 1634 static int __send_empty_flush(struct clone_info *ci) 1635 { 1636 unsigned target_nr = 0; 1637 struct dm_target *ti; 1638 1639 BUG_ON(bio_has_data(ci->bio)); 1640 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1641 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1642 1643 return 0; 1644 } 1645 1646 static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 1647 sector_t sector, unsigned *len) 1648 { 1649 struct bio *bio = ci->bio; 1650 struct dm_target_io *tio; 1651 unsigned target_bio_nr; 1652 unsigned num_target_bios = 1; 1653 int r = 0; 1654 1655 /* 1656 * Does the target want to receive duplicate copies of the bio? 1657 */ 1658 if (bio_data_dir(bio) == WRITE && ti->num_write_bios) 1659 num_target_bios = ti->num_write_bios(ti, bio); 1660 1661 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 1662 tio = alloc_tio(ci, ti, target_bio_nr); 1663 tio->len_ptr = len; 1664 r = clone_bio(tio, bio, sector, *len); 1665 if (r < 0) { 1666 free_tio(ci->md, tio); 1667 break; 1668 } 1669 __map_bio(tio); 1670 } 1671 1672 return r; 1673 } 1674 1675 typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 1676 1677 static unsigned get_num_discard_bios(struct dm_target *ti) 1678 { 1679 return ti->num_discard_bios; 1680 } 1681 1682 static unsigned get_num_write_same_bios(struct dm_target *ti) 1683 { 1684 return ti->num_write_same_bios; 1685 } 1686 1687 typedef bool (*is_split_required_fn)(struct dm_target *ti); 1688 1689 static bool is_split_required_for_discard(struct dm_target *ti) 1690 { 1691 return ti->split_discard_bios; 1692 } 1693 1694 static int __send_changing_extent_only(struct clone_info *ci, 1695 get_num_bios_fn get_num_bios, 1696 is_split_required_fn is_split_required) 1697 { 1698 struct dm_target *ti; 1699 unsigned len; 1700 unsigned num_bios; 1701 1702 do { 1703 ti = dm_table_find_target(ci->map, ci->sector); 1704 if (!dm_target_is_valid(ti)) 1705 return -EIO; 1706 1707 /* 1708 * Even though the device advertised support for this type of 1709 * request, that does not mean every target supports it, and 1710 * reconfiguration might also have changed that since the 1711 * check was performed. 1712 */ 1713 num_bios = get_num_bios ? get_num_bios(ti) : 0; 1714 if (!num_bios) 1715 return -EOPNOTSUPP; 1716 1717 if (is_split_required && !is_split_required(ti)) 1718 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 1719 else 1720 len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 1721 1722 __send_duplicate_bios(ci, ti, num_bios, &len); 1723 1724 ci->sector += len; 1725 } while (ci->sector_count -= len); 1726 1727 return 0; 1728 } 1729 1730 static int __send_discard(struct clone_info *ci) 1731 { 1732 return __send_changing_extent_only(ci, get_num_discard_bios, 1733 is_split_required_for_discard); 1734 } 1735 1736 static int __send_write_same(struct clone_info *ci) 1737 { 1738 return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); 1739 } 1740 1741 /* 1742 * Select the correct strategy for processing a non-flush bio. 1743 */ 1744 static int __split_and_process_non_flush(struct clone_info *ci) 1745 { 1746 struct bio *bio = ci->bio; 1747 struct dm_target *ti; 1748 unsigned len; 1749 int r; 1750 1751 if (unlikely(bio->bi_rw & REQ_DISCARD)) 1752 return __send_discard(ci); 1753 else if (unlikely(bio->bi_rw & REQ_WRITE_SAME)) 1754 return __send_write_same(ci); 1755 1756 ti = dm_table_find_target(ci->map, ci->sector); 1757 if (!dm_target_is_valid(ti)) 1758 return -EIO; 1759 1760 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1761 1762 r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1763 if (r < 0) 1764 return r; 1765 1766 ci->sector += len; 1767 ci->sector_count -= len; 1768 1769 return 0; 1770 } 1771 1772 /* 1773 * Entry point to split a bio into clones and submit them to the targets. 1774 */ 1775 static void __split_and_process_bio(struct mapped_device *md, 1776 struct dm_table *map, struct bio *bio) 1777 { 1778 struct clone_info ci; 1779 int error = 0; 1780 1781 if (unlikely(!map)) { 1782 bio_io_error(bio); 1783 return; 1784 } 1785 1786 ci.map = map; 1787 ci.md = md; 1788 ci.io = alloc_io(md); 1789 ci.io->error = 0; 1790 atomic_set(&ci.io->io_count, 1); 1791 ci.io->bio = bio; 1792 ci.io->md = md; 1793 spin_lock_init(&ci.io->endio_lock); 1794 ci.sector = bio->bi_iter.bi_sector; 1795 1796 start_io_acct(ci.io); 1797 1798 if (bio->bi_rw & REQ_FLUSH) { 1799 ci.bio = &ci.md->flush_bio; 1800 ci.sector_count = 0; 1801 error = __send_empty_flush(&ci); 1802 /* dec_pending submits any data associated with flush */ 1803 } else { 1804 ci.bio = bio; 1805 ci.sector_count = bio_sectors(bio); 1806 while (ci.sector_count && !error) 1807 error = __split_and_process_non_flush(&ci); 1808 } 1809 1810 /* drop the extra reference count */ 1811 dec_pending(ci.io, error); 1812 } 1813 /*----------------------------------------------------------------- 1814 * CRUD END 1815 *---------------------------------------------------------------*/ 1816 1817 /* 1818 * The request function that just remaps the bio built up by 1819 * dm_merge_bvec. 1820 */ 1821 static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) 1822 { 1823 int rw = bio_data_dir(bio); 1824 struct mapped_device *md = q->queuedata; 1825 int srcu_idx; 1826 struct dm_table *map; 1827 1828 map = dm_get_live_table(md, &srcu_idx); 1829 1830 generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0); 1831 1832 /* if we're suspended, we have to queue this io for later */ 1833 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 1834 dm_put_live_table(md, srcu_idx); 1835 1836 if (bio_rw(bio) != READA) 1837 queue_io(md, bio); 1838 else 1839 bio_io_error(bio); 1840 return BLK_QC_T_NONE; 1841 } 1842 1843 __split_and_process_bio(md, map, bio); 1844 dm_put_live_table(md, srcu_idx); 1845 return BLK_QC_T_NONE; 1846 } 1847 1848 int dm_request_based(struct mapped_device *md) 1849 { 1850 return blk_queue_stackable(md->queue); 1851 } 1852 1853 static void dm_dispatch_clone_request(struct request *clone, struct request *rq) 1854 { 1855 int r; 1856 1857 if (blk_queue_io_stat(clone->q)) 1858 clone->cmd_flags |= REQ_IO_STAT; 1859 1860 clone->start_time = jiffies; 1861 r = blk_insert_cloned_request(clone->q, clone); 1862 if (r) 1863 /* must complete clone in terms of original request */ 1864 dm_complete_request(rq, r); 1865 } 1866 1867 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, 1868 void *data) 1869 { 1870 struct dm_rq_target_io *tio = data; 1871 struct dm_rq_clone_bio_info *info = 1872 container_of(bio, struct dm_rq_clone_bio_info, clone); 1873 1874 info->orig = bio_orig; 1875 info->tio = tio; 1876 bio->bi_end_io = end_clone_bio; 1877 1878 return 0; 1879 } 1880 1881 static int setup_clone(struct request *clone, struct request *rq, 1882 struct dm_rq_target_io *tio, gfp_t gfp_mask) 1883 { 1884 int r; 1885 1886 r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, 1887 dm_rq_bio_constructor, tio); 1888 if (r) 1889 return r; 1890 1891 clone->cmd = rq->cmd; 1892 clone->cmd_len = rq->cmd_len; 1893 clone->sense = rq->sense; 1894 clone->end_io = end_clone_request; 1895 clone->end_io_data = tio; 1896 1897 tio->clone = clone; 1898 1899 return 0; 1900 } 1901 1902 static struct request *clone_old_rq(struct request *rq, struct mapped_device *md, 1903 struct dm_rq_target_io *tio, gfp_t gfp_mask) 1904 { 1905 /* 1906 * Create clone for use with .request_fn request_queue 1907 */ 1908 struct request *clone; 1909 1910 clone = alloc_old_clone_request(md, gfp_mask); 1911 if (!clone) 1912 return NULL; 1913 1914 blk_rq_init(NULL, clone); 1915 if (setup_clone(clone, rq, tio, gfp_mask)) { 1916 /* -ENOMEM */ 1917 free_old_clone_request(md, clone); 1918 return NULL; 1919 } 1920 1921 return clone; 1922 } 1923 1924 static void map_tio_request(struct kthread_work *work); 1925 1926 static void init_tio(struct dm_rq_target_io *tio, struct request *rq, 1927 struct mapped_device *md) 1928 { 1929 tio->md = md; 1930 tio->ti = NULL; 1931 tio->clone = NULL; 1932 tio->orig = rq; 1933 tio->error = 0; 1934 /* 1935 * Avoid initializing info for blk-mq; it passes 1936 * target-specific data through info.ptr 1937 * (see: dm_mq_init_request) 1938 */ 1939 if (!md->init_tio_pdu) 1940 memset(&tio->info, 0, sizeof(tio->info)); 1941 if (md->kworker_task) 1942 init_kthread_work(&tio->work, map_tio_request); 1943 } 1944 1945 static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq, 1946 struct mapped_device *md, 1947 gfp_t gfp_mask) 1948 { 1949 struct dm_rq_target_io *tio; 1950 int srcu_idx; 1951 struct dm_table *table; 1952 1953 tio = alloc_old_rq_tio(md, gfp_mask); 1954 if (!tio) 1955 return NULL; 1956 1957 init_tio(tio, rq, md); 1958 1959 table = dm_get_live_table(md, &srcu_idx); 1960 /* 1961 * Must clone a request if this .request_fn DM device 1962 * is stacked on .request_fn device(s). 1963 */ 1964 if (!dm_table_mq_request_based(table)) { 1965 if (!clone_old_rq(rq, md, tio, gfp_mask)) { 1966 dm_put_live_table(md, srcu_idx); 1967 free_old_rq_tio(tio); 1968 return NULL; 1969 } 1970 } 1971 dm_put_live_table(md, srcu_idx); 1972 1973 return tio; 1974 } 1975 1976 /* 1977 * Called with the queue lock held. 1978 */ 1979 static int dm_old_prep_fn(struct request_queue *q, struct request *rq) 1980 { 1981 struct mapped_device *md = q->queuedata; 1982 struct dm_rq_target_io *tio; 1983 1984 if (unlikely(rq->special)) { 1985 DMWARN("Already has something in rq->special."); 1986 return BLKPREP_KILL; 1987 } 1988 1989 tio = dm_old_prep_tio(rq, md, GFP_ATOMIC); 1990 if (!tio) 1991 return BLKPREP_DEFER; 1992 1993 rq->special = tio; 1994 rq->cmd_flags |= REQ_DONTPREP; 1995 1996 return BLKPREP_OK; 1997 } 1998 1999 /* 2000 * Returns: 2001 * 0 : the request has been processed 2002 * DM_MAPIO_REQUEUE : the original request needs to be requeued 2003 * < 0 : the request was completed due to failure 2004 */ 2005 static int map_request(struct dm_rq_target_io *tio, struct request *rq, 2006 struct mapped_device *md) 2007 { 2008 int r; 2009 struct dm_target *ti = tio->ti; 2010 struct request *clone = NULL; 2011 2012 if (tio->clone) { 2013 clone = tio->clone; 2014 r = ti->type->map_rq(ti, clone, &tio->info); 2015 } else { 2016 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); 2017 if (r < 0) { 2018 /* The target wants to complete the I/O */ 2019 dm_kill_unmapped_request(rq, r); 2020 return r; 2021 } 2022 if (r != DM_MAPIO_REMAPPED) 2023 return r; 2024 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { 2025 /* -ENOMEM */ 2026 ti->type->release_clone_rq(clone); 2027 return DM_MAPIO_REQUEUE; 2028 } 2029 } 2030 2031 switch (r) { 2032 case DM_MAPIO_SUBMITTED: 2033 /* The target has taken the I/O to submit by itself later */ 2034 break; 2035 case DM_MAPIO_REMAPPED: 2036 /* The target has remapped the I/O so dispatch it */ 2037 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 2038 blk_rq_pos(rq)); 2039 dm_dispatch_clone_request(clone, rq); 2040 break; 2041 case DM_MAPIO_REQUEUE: 2042 /* The target wants to requeue the I/O */ 2043 dm_requeue_original_request(md, tio->orig); 2044 break; 2045 default: 2046 if (r > 0) { 2047 DMWARN("unimplemented target map return value: %d", r); 2048 BUG(); 2049 } 2050 2051 /* The target wants to complete the I/O */ 2052 dm_kill_unmapped_request(rq, r); 2053 return r; 2054 } 2055 2056 return 0; 2057 } 2058 2059 static void map_tio_request(struct kthread_work *work) 2060 { 2061 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); 2062 struct request *rq = tio->orig; 2063 struct mapped_device *md = tio->md; 2064 2065 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) 2066 dm_requeue_original_request(md, rq); 2067 } 2068 2069 static void dm_start_request(struct mapped_device *md, struct request *orig) 2070 { 2071 if (!orig->q->mq_ops) 2072 blk_start_request(orig); 2073 else 2074 blk_mq_start_request(orig); 2075 atomic_inc(&md->pending[rq_data_dir(orig)]); 2076 2077 if (md->seq_rq_merge_deadline_usecs) { 2078 md->last_rq_pos = rq_end_sector(orig); 2079 md->last_rq_rw = rq_data_dir(orig); 2080 md->last_rq_start_time = ktime_get(); 2081 } 2082 2083 if (unlikely(dm_stats_used(&md->stats))) { 2084 struct dm_rq_target_io *tio = tio_from_request(orig); 2085 tio->duration_jiffies = jiffies; 2086 tio->n_sectors = blk_rq_sectors(orig); 2087 dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig), 2088 tio->n_sectors, false, 0, &tio->stats_aux); 2089 } 2090 2091 /* 2092 * Hold the md reference here for the in-flight I/O. 2093 * We can't rely on the reference count by device opener, 2094 * because the device may be closed during the request completion 2095 * when all bios are completed. 2096 * See the comment in rq_completed() too. 2097 */ 2098 dm_get(md); 2099 } 2100 2101 #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000 2102 2103 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) 2104 { 2105 return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs); 2106 } 2107 2108 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, 2109 const char *buf, size_t count) 2110 { 2111 unsigned deadline; 2112 2113 if (!dm_request_based(md) || md->use_blk_mq) 2114 return count; 2115 2116 if (kstrtouint(buf, 10, &deadline)) 2117 return -EINVAL; 2118 2119 if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS) 2120 deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS; 2121 2122 md->seq_rq_merge_deadline_usecs = deadline; 2123 2124 return count; 2125 } 2126 2127 static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md) 2128 { 2129 ktime_t kt_deadline; 2130 2131 if (!md->seq_rq_merge_deadline_usecs) 2132 return false; 2133 2134 kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC); 2135 kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline); 2136 2137 return !ktime_after(ktime_get(), kt_deadline); 2138 } 2139 2140 /* 2141 * q->request_fn for request-based dm. 2142 * Called with the queue lock held. 2143 */ 2144 static void dm_request_fn(struct request_queue *q) 2145 { 2146 struct mapped_device *md = q->queuedata; 2147 struct dm_target *ti = md->immutable_target; 2148 struct request *rq; 2149 struct dm_rq_target_io *tio; 2150 sector_t pos = 0; 2151 2152 if (unlikely(!ti)) { 2153 int srcu_idx; 2154 struct dm_table *map = dm_get_live_table(md, &srcu_idx); 2155 2156 ti = dm_table_find_target(map, pos); 2157 dm_put_live_table(md, srcu_idx); 2158 } 2159 2160 /* 2161 * For suspend, check blk_queue_stopped() and increment 2162 * ->pending within a single queue_lock not to increment the 2163 * number of in-flight I/Os after the queue is stopped in 2164 * dm_suspend(). 2165 */ 2166 while (!blk_queue_stopped(q)) { 2167 rq = blk_peek_request(q); 2168 if (!rq) 2169 return; 2170 2171 /* always use block 0 to find the target for flushes for now */ 2172 pos = 0; 2173 if (!(rq->cmd_flags & REQ_FLUSH)) 2174 pos = blk_rq_pos(rq); 2175 2176 if ((dm_request_peeked_before_merge_deadline(md) && 2177 md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 && 2178 md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) || 2179 (ti->type->busy && ti->type->busy(ti))) { 2180 blk_delay_queue(q, HZ / 100); 2181 return; 2182 } 2183 2184 dm_start_request(md, rq); 2185 2186 tio = tio_from_request(rq); 2187 /* Establish tio->ti before queuing work (map_tio_request) */ 2188 tio->ti = ti; 2189 queue_kthread_work(&md->kworker, &tio->work); 2190 BUG_ON(!irqs_disabled()); 2191 } 2192 } 2193 2194 static int dm_any_congested(void *congested_data, int bdi_bits) 2195 { 2196 int r = bdi_bits; 2197 struct mapped_device *md = congested_data; 2198 struct dm_table *map; 2199 2200 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2201 if (dm_request_based(md)) { 2202 /* 2203 * With request-based DM we only need to check the 2204 * top-level queue for congestion. 2205 */ 2206 r = md->queue->backing_dev_info.wb.state & bdi_bits; 2207 } else { 2208 map = dm_get_live_table_fast(md); 2209 if (map) 2210 r = dm_table_any_congested(map, bdi_bits); 2211 dm_put_live_table_fast(md); 2212 } 2213 } 2214 2215 return r; 2216 } 2217 2218 /*----------------------------------------------------------------- 2219 * An IDR is used to keep track of allocated minor numbers. 2220 *---------------------------------------------------------------*/ 2221 static void free_minor(int minor) 2222 { 2223 spin_lock(&_minor_lock); 2224 idr_remove(&_minor_idr, minor); 2225 spin_unlock(&_minor_lock); 2226 } 2227 2228 /* 2229 * See if the device with a specific minor # is free. 2230 */ 2231 static int specific_minor(int minor) 2232 { 2233 int r; 2234 2235 if (minor >= (1 << MINORBITS)) 2236 return -EINVAL; 2237 2238 idr_preload(GFP_KERNEL); 2239 spin_lock(&_minor_lock); 2240 2241 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 2242 2243 spin_unlock(&_minor_lock); 2244 idr_preload_end(); 2245 if (r < 0) 2246 return r == -ENOSPC ? -EBUSY : r; 2247 return 0; 2248 } 2249 2250 static int next_free_minor(int *minor) 2251 { 2252 int r; 2253 2254 idr_preload(GFP_KERNEL); 2255 spin_lock(&_minor_lock); 2256 2257 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 2258 2259 spin_unlock(&_minor_lock); 2260 idr_preload_end(); 2261 if (r < 0) 2262 return r; 2263 *minor = r; 2264 return 0; 2265 } 2266 2267 static const struct block_device_operations dm_blk_dops; 2268 2269 static void dm_wq_work(struct work_struct *work); 2270 2271 static void dm_init_md_queue(struct mapped_device *md) 2272 { 2273 /* 2274 * Request-based dm devices cannot be stacked on top of bio-based dm 2275 * devices. The type of this dm device may not have been decided yet. 2276 * The type is decided at the first table loading time. 2277 * To prevent problematic device stacking, clear the queue flag 2278 * for request stacking support until then. 2279 * 2280 * This queue is new, so no concurrency on the queue_flags. 2281 */ 2282 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 2283 2284 /* 2285 * Initialize data that will only be used by a non-blk-mq DM queue 2286 * - must do so here (in alloc_dev callchain) before queue is used 2287 */ 2288 md->queue->queuedata = md; 2289 md->queue->backing_dev_info.congested_data = md; 2290 } 2291 2292 static void dm_init_normal_md_queue(struct mapped_device *md) 2293 { 2294 md->use_blk_mq = false; 2295 dm_init_md_queue(md); 2296 2297 /* 2298 * Initialize aspects of queue that aren't relevant for blk-mq 2299 */ 2300 md->queue->backing_dev_info.congested_fn = dm_any_congested; 2301 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 2302 } 2303 2304 static void cleanup_mapped_device(struct mapped_device *md) 2305 { 2306 if (md->wq) 2307 destroy_workqueue(md->wq); 2308 if (md->kworker_task) 2309 kthread_stop(md->kworker_task); 2310 mempool_destroy(md->io_pool); 2311 mempool_destroy(md->rq_pool); 2312 if (md->bs) 2313 bioset_free(md->bs); 2314 2315 cleanup_srcu_struct(&md->io_barrier); 2316 2317 if (md->disk) { 2318 spin_lock(&_minor_lock); 2319 md->disk->private_data = NULL; 2320 spin_unlock(&_minor_lock); 2321 del_gendisk(md->disk); 2322 put_disk(md->disk); 2323 } 2324 2325 if (md->queue) 2326 blk_cleanup_queue(md->queue); 2327 2328 if (md->bdev) { 2329 bdput(md->bdev); 2330 md->bdev = NULL; 2331 } 2332 } 2333 2334 /* 2335 * Allocate and initialise a blank device with a given minor. 2336 */ 2337 static struct mapped_device *alloc_dev(int minor) 2338 { 2339 int r, numa_node_id = dm_get_numa_node(); 2340 struct mapped_device *md; 2341 void *old_md; 2342 2343 md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 2344 if (!md) { 2345 DMWARN("unable to allocate device, out of memory."); 2346 return NULL; 2347 } 2348 2349 if (!try_module_get(THIS_MODULE)) 2350 goto bad_module_get; 2351 2352 /* get a minor number for the dev */ 2353 if (minor == DM_ANY_MINOR) 2354 r = next_free_minor(&minor); 2355 else 2356 r = specific_minor(minor); 2357 if (r < 0) 2358 goto bad_minor; 2359 2360 r = init_srcu_struct(&md->io_barrier); 2361 if (r < 0) 2362 goto bad_io_barrier; 2363 2364 md->numa_node_id = numa_node_id; 2365 md->use_blk_mq = use_blk_mq; 2366 md->init_tio_pdu = false; 2367 md->type = DM_TYPE_NONE; 2368 mutex_init(&md->suspend_lock); 2369 mutex_init(&md->type_lock); 2370 mutex_init(&md->table_devices_lock); 2371 spin_lock_init(&md->deferred_lock); 2372 atomic_set(&md->holders, 1); 2373 atomic_set(&md->open_count, 0); 2374 atomic_set(&md->event_nr, 0); 2375 atomic_set(&md->uevent_seq, 0); 2376 INIT_LIST_HEAD(&md->uevent_list); 2377 INIT_LIST_HEAD(&md->table_devices); 2378 spin_lock_init(&md->uevent_lock); 2379 2380 md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); 2381 if (!md->queue) 2382 goto bad; 2383 2384 dm_init_md_queue(md); 2385 2386 md->disk = alloc_disk_node(1, numa_node_id); 2387 if (!md->disk) 2388 goto bad; 2389 2390 atomic_set(&md->pending[0], 0); 2391 atomic_set(&md->pending[1], 0); 2392 init_waitqueue_head(&md->wait); 2393 INIT_WORK(&md->work, dm_wq_work); 2394 init_waitqueue_head(&md->eventq); 2395 init_completion(&md->kobj_holder.completion); 2396 md->kworker_task = NULL; 2397 2398 md->disk->major = _major; 2399 md->disk->first_minor = minor; 2400 md->disk->fops = &dm_blk_dops; 2401 md->disk->queue = md->queue; 2402 md->disk->private_data = md; 2403 sprintf(md->disk->disk_name, "dm-%d", minor); 2404 add_disk(md->disk); 2405 format_dev_t(md->name, MKDEV(_major, minor)); 2406 2407 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 2408 if (!md->wq) 2409 goto bad; 2410 2411 md->bdev = bdget_disk(md->disk, 0); 2412 if (!md->bdev) 2413 goto bad; 2414 2415 bio_init(&md->flush_bio); 2416 md->flush_bio.bi_bdev = md->bdev; 2417 md->flush_bio.bi_rw = WRITE_FLUSH; 2418 2419 dm_stats_init(&md->stats); 2420 2421 /* Populate the mapping, nobody knows we exist yet */ 2422 spin_lock(&_minor_lock); 2423 old_md = idr_replace(&_minor_idr, md, minor); 2424 spin_unlock(&_minor_lock); 2425 2426 BUG_ON(old_md != MINOR_ALLOCED); 2427 2428 return md; 2429 2430 bad: 2431 cleanup_mapped_device(md); 2432 bad_io_barrier: 2433 free_minor(minor); 2434 bad_minor: 2435 module_put(THIS_MODULE); 2436 bad_module_get: 2437 kfree(md); 2438 return NULL; 2439 } 2440 2441 static void unlock_fs(struct mapped_device *md); 2442 2443 static void free_dev(struct mapped_device *md) 2444 { 2445 int minor = MINOR(disk_devt(md->disk)); 2446 2447 unlock_fs(md); 2448 2449 cleanup_mapped_device(md); 2450 if (md->tag_set) { 2451 blk_mq_free_tag_set(md->tag_set); 2452 kfree(md->tag_set); 2453 } 2454 2455 free_table_devices(&md->table_devices); 2456 dm_stats_cleanup(&md->stats); 2457 free_minor(minor); 2458 2459 module_put(THIS_MODULE); 2460 kfree(md); 2461 } 2462 2463 static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 2464 { 2465 struct dm_md_mempools *p = dm_table_get_md_mempools(t); 2466 2467 if (md->bs) { 2468 /* The md already has necessary mempools. */ 2469 if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) { 2470 /* 2471 * Reload bioset because front_pad may have changed 2472 * because a different table was loaded. 2473 */ 2474 bioset_free(md->bs); 2475 md->bs = p->bs; 2476 p->bs = NULL; 2477 } 2478 /* 2479 * There's no need to reload with request-based dm 2480 * because the size of front_pad doesn't change. 2481 * Note for future: If you are to reload bioset, 2482 * prep-ed requests in the queue may refer 2483 * to bio from the old bioset, so you must walk 2484 * through the queue to unprep. 2485 */ 2486 goto out; 2487 } 2488 2489 BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); 2490 2491 md->io_pool = p->io_pool; 2492 p->io_pool = NULL; 2493 md->rq_pool = p->rq_pool; 2494 p->rq_pool = NULL; 2495 md->bs = p->bs; 2496 p->bs = NULL; 2497 2498 out: 2499 /* mempool bind completed, no longer need any mempools in the table */ 2500 dm_table_free_md_mempools(t); 2501 } 2502 2503 /* 2504 * Bind a table to the device. 2505 */ 2506 static void event_callback(void *context) 2507 { 2508 unsigned long flags; 2509 LIST_HEAD(uevents); 2510 struct mapped_device *md = (struct mapped_device *) context; 2511 2512 spin_lock_irqsave(&md->uevent_lock, flags); 2513 list_splice_init(&md->uevent_list, &uevents); 2514 spin_unlock_irqrestore(&md->uevent_lock, flags); 2515 2516 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 2517 2518 atomic_inc(&md->event_nr); 2519 wake_up(&md->eventq); 2520 } 2521 2522 /* 2523 * Protected by md->suspend_lock obtained by dm_swap_table(). 2524 */ 2525 static void __set_size(struct mapped_device *md, sector_t size) 2526 { 2527 set_capacity(md->disk, size); 2528 2529 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 2530 } 2531 2532 /* 2533 * Returns old map, which caller must destroy. 2534 */ 2535 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2536 struct queue_limits *limits) 2537 { 2538 struct dm_table *old_map; 2539 struct request_queue *q = md->queue; 2540 sector_t size; 2541 2542 size = dm_table_get_size(t); 2543 2544 /* 2545 * Wipe any geometry if the size of the table changed. 2546 */ 2547 if (size != dm_get_size(md)) 2548 memset(&md->geometry, 0, sizeof(md->geometry)); 2549 2550 __set_size(md, size); 2551 2552 dm_table_event_callback(t, event_callback, md); 2553 2554 /* 2555 * The queue hasn't been stopped yet, if the old table type wasn't 2556 * for request-based during suspension. So stop it to prevent 2557 * I/O mapping before resume. 2558 * This must be done before setting the queue restrictions, 2559 * because request-based dm may be run just after the setting. 2560 */ 2561 if (dm_table_request_based(t)) { 2562 dm_stop_queue(q); 2563 /* 2564 * Leverage the fact that request-based DM targets are 2565 * immutable singletons and establish md->immutable_target 2566 * - used to optimize both dm_request_fn and dm_mq_queue_rq 2567 */ 2568 md->immutable_target = dm_table_get_immutable_target(t); 2569 } 2570 2571 __bind_mempools(md, t); 2572 2573 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2574 rcu_assign_pointer(md->map, (void *)t); 2575 md->immutable_target_type = dm_table_get_immutable_target_type(t); 2576 2577 dm_table_set_restrictions(t, q, limits); 2578 if (old_map) 2579 dm_sync_table(md); 2580 2581 return old_map; 2582 } 2583 2584 /* 2585 * Returns unbound table for the caller to free. 2586 */ 2587 static struct dm_table *__unbind(struct mapped_device *md) 2588 { 2589 struct dm_table *map = rcu_dereference_protected(md->map, 1); 2590 2591 if (!map) 2592 return NULL; 2593 2594 dm_table_event_callback(map, NULL, NULL); 2595 RCU_INIT_POINTER(md->map, NULL); 2596 dm_sync_table(md); 2597 2598 return map; 2599 } 2600 2601 /* 2602 * Constructor for a new device. 2603 */ 2604 int dm_create(int minor, struct mapped_device **result) 2605 { 2606 struct mapped_device *md; 2607 2608 md = alloc_dev(minor); 2609 if (!md) 2610 return -ENXIO; 2611 2612 dm_sysfs_init(md); 2613 2614 *result = md; 2615 return 0; 2616 } 2617 2618 /* 2619 * Functions to manage md->type. 2620 * All are required to hold md->type_lock. 2621 */ 2622 void dm_lock_md_type(struct mapped_device *md) 2623 { 2624 mutex_lock(&md->type_lock); 2625 } 2626 2627 void dm_unlock_md_type(struct mapped_device *md) 2628 { 2629 mutex_unlock(&md->type_lock); 2630 } 2631 2632 void dm_set_md_type(struct mapped_device *md, unsigned type) 2633 { 2634 BUG_ON(!mutex_is_locked(&md->type_lock)); 2635 md->type = type; 2636 } 2637 2638 unsigned dm_get_md_type(struct mapped_device *md) 2639 { 2640 return md->type; 2641 } 2642 2643 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 2644 { 2645 return md->immutable_target_type; 2646 } 2647 2648 /* 2649 * The queue_limits are only valid as long as you have a reference 2650 * count on 'md'. 2651 */ 2652 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2653 { 2654 BUG_ON(!atomic_read(&md->holders)); 2655 return &md->queue->limits; 2656 } 2657 EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2658 2659 static void dm_old_init_rq_based_worker_thread(struct mapped_device *md) 2660 { 2661 /* Initialize the request-based DM worker thread */ 2662 init_kthread_worker(&md->kworker); 2663 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, 2664 "kdmwork-%s", dm_device_name(md)); 2665 } 2666 2667 /* 2668 * Fully initialize a .request_fn request-based queue. 2669 */ 2670 static int dm_old_init_request_queue(struct mapped_device *md) 2671 { 2672 /* Fully initialize the queue */ 2673 if (!blk_init_allocated_queue(md->queue, dm_request_fn, NULL)) 2674 return -EINVAL; 2675 2676 /* disable dm_request_fn's merge heuristic by default */ 2677 md->seq_rq_merge_deadline_usecs = 0; 2678 2679 dm_init_normal_md_queue(md); 2680 blk_queue_softirq_done(md->queue, dm_softirq_done); 2681 blk_queue_prep_rq(md->queue, dm_old_prep_fn); 2682 2683 dm_old_init_rq_based_worker_thread(md); 2684 2685 elv_register_queue(md->queue); 2686 2687 return 0; 2688 } 2689 2690 static int dm_mq_init_request(void *data, struct request *rq, 2691 unsigned int hctx_idx, unsigned int request_idx, 2692 unsigned int numa_node) 2693 { 2694 struct mapped_device *md = data; 2695 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); 2696 2697 /* 2698 * Must initialize md member of tio, otherwise it won't 2699 * be available in dm_mq_queue_rq. 2700 */ 2701 tio->md = md; 2702 2703 if (md->init_tio_pdu) { 2704 /* target-specific per-io data is immediately after the tio */ 2705 tio->info.ptr = tio + 1; 2706 } 2707 2708 return 0; 2709 } 2710 2711 static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 2712 const struct blk_mq_queue_data *bd) 2713 { 2714 struct request *rq = bd->rq; 2715 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); 2716 struct mapped_device *md = tio->md; 2717 struct dm_target *ti = md->immutable_target; 2718 2719 if (unlikely(!ti)) { 2720 int srcu_idx; 2721 struct dm_table *map = dm_get_live_table(md, &srcu_idx); 2722 2723 ti = dm_table_find_target(map, 0); 2724 dm_put_live_table(md, srcu_idx); 2725 } 2726 2727 if (ti->type->busy && ti->type->busy(ti)) 2728 return BLK_MQ_RQ_QUEUE_BUSY; 2729 2730 dm_start_request(md, rq); 2731 2732 /* Init tio using md established in .init_request */ 2733 init_tio(tio, rq, md); 2734 2735 /* 2736 * Establish tio->ti before queuing work (map_tio_request) 2737 * or making direct call to map_request(). 2738 */ 2739 tio->ti = ti; 2740 2741 /* Direct call is fine since .queue_rq allows allocations */ 2742 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { 2743 /* Undo dm_start_request() before requeuing */ 2744 rq_end_stats(md, rq); 2745 rq_completed(md, rq_data_dir(rq), false); 2746 return BLK_MQ_RQ_QUEUE_BUSY; 2747 } 2748 2749 return BLK_MQ_RQ_QUEUE_OK; 2750 } 2751 2752 static struct blk_mq_ops dm_mq_ops = { 2753 .queue_rq = dm_mq_queue_rq, 2754 .map_queue = blk_mq_map_queue, 2755 .complete = dm_softirq_done, 2756 .init_request = dm_mq_init_request, 2757 }; 2758 2759 static int dm_mq_init_request_queue(struct mapped_device *md, 2760 struct dm_target *immutable_tgt) 2761 { 2762 struct request_queue *q; 2763 int err; 2764 2765 if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) { 2766 DMERR("request-based dm-mq may only be stacked on blk-mq device(s)"); 2767 return -EINVAL; 2768 } 2769 2770 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); 2771 if (!md->tag_set) 2772 return -ENOMEM; 2773 2774 md->tag_set->ops = &dm_mq_ops; 2775 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); 2776 md->tag_set->numa_node = md->numa_node_id; 2777 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 2778 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); 2779 md->tag_set->driver_data = md; 2780 2781 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); 2782 if (immutable_tgt && immutable_tgt->per_io_data_size) { 2783 /* any target-specific per-io data is immediately after the tio */ 2784 md->tag_set->cmd_size += immutable_tgt->per_io_data_size; 2785 md->init_tio_pdu = true; 2786 } 2787 2788 err = blk_mq_alloc_tag_set(md->tag_set); 2789 if (err) 2790 goto out_kfree_tag_set; 2791 2792 q = blk_mq_init_allocated_queue(md->tag_set, md->queue); 2793 if (IS_ERR(q)) { 2794 err = PTR_ERR(q); 2795 goto out_tag_set; 2796 } 2797 dm_init_md_queue(md); 2798 2799 /* backfill 'mq' sysfs registration normally done in blk_register_queue */ 2800 blk_mq_register_disk(md->disk); 2801 2802 return 0; 2803 2804 out_tag_set: 2805 blk_mq_free_tag_set(md->tag_set); 2806 out_kfree_tag_set: 2807 kfree(md->tag_set); 2808 2809 return err; 2810 } 2811 2812 static unsigned filter_md_type(unsigned type, struct mapped_device *md) 2813 { 2814 if (type == DM_TYPE_BIO_BASED) 2815 return type; 2816 2817 return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED; 2818 } 2819 2820 /* 2821 * Setup the DM device's queue based on md's type 2822 */ 2823 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 2824 { 2825 int r; 2826 unsigned md_type = filter_md_type(dm_get_md_type(md), md); 2827 2828 switch (md_type) { 2829 case DM_TYPE_REQUEST_BASED: 2830 r = dm_old_init_request_queue(md); 2831 if (r) { 2832 DMERR("Cannot initialize queue for request-based mapped device"); 2833 return r; 2834 } 2835 break; 2836 case DM_TYPE_MQ_REQUEST_BASED: 2837 r = dm_mq_init_request_queue(md, dm_table_get_immutable_target(t)); 2838 if (r) { 2839 DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 2840 return r; 2841 } 2842 break; 2843 case DM_TYPE_BIO_BASED: 2844 dm_init_normal_md_queue(md); 2845 blk_queue_make_request(md->queue, dm_make_request); 2846 /* 2847 * DM handles splitting bios as needed. Free the bio_split bioset 2848 * since it won't be used (saves 1 process per bio-based DM device). 2849 */ 2850 bioset_free(md->queue->bio_split); 2851 md->queue->bio_split = NULL; 2852 break; 2853 } 2854 2855 return 0; 2856 } 2857 2858 struct mapped_device *dm_get_md(dev_t dev) 2859 { 2860 struct mapped_device *md; 2861 unsigned minor = MINOR(dev); 2862 2863 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 2864 return NULL; 2865 2866 spin_lock(&_minor_lock); 2867 2868 md = idr_find(&_minor_idr, minor); 2869 if (md) { 2870 if ((md == MINOR_ALLOCED || 2871 (MINOR(disk_devt(dm_disk(md))) != minor) || 2872 dm_deleting_md(md) || 2873 test_bit(DMF_FREEING, &md->flags))) { 2874 md = NULL; 2875 goto out; 2876 } 2877 dm_get(md); 2878 } 2879 2880 out: 2881 spin_unlock(&_minor_lock); 2882 2883 return md; 2884 } 2885 EXPORT_SYMBOL_GPL(dm_get_md); 2886 2887 void *dm_get_mdptr(struct mapped_device *md) 2888 { 2889 return md->interface_ptr; 2890 } 2891 2892 void dm_set_mdptr(struct mapped_device *md, void *ptr) 2893 { 2894 md->interface_ptr = ptr; 2895 } 2896 2897 void dm_get(struct mapped_device *md) 2898 { 2899 atomic_inc(&md->holders); 2900 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2901 } 2902 2903 int dm_hold(struct mapped_device *md) 2904 { 2905 spin_lock(&_minor_lock); 2906 if (test_bit(DMF_FREEING, &md->flags)) { 2907 spin_unlock(&_minor_lock); 2908 return -EBUSY; 2909 } 2910 dm_get(md); 2911 spin_unlock(&_minor_lock); 2912 return 0; 2913 } 2914 EXPORT_SYMBOL_GPL(dm_hold); 2915 2916 const char *dm_device_name(struct mapped_device *md) 2917 { 2918 return md->name; 2919 } 2920 EXPORT_SYMBOL_GPL(dm_device_name); 2921 2922 static void __dm_destroy(struct mapped_device *md, bool wait) 2923 { 2924 struct dm_table *map; 2925 int srcu_idx; 2926 2927 might_sleep(); 2928 2929 spin_lock(&_minor_lock); 2930 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2931 set_bit(DMF_FREEING, &md->flags); 2932 spin_unlock(&_minor_lock); 2933 2934 if (dm_request_based(md) && md->kworker_task) 2935 flush_kthread_worker(&md->kworker); 2936 2937 /* 2938 * Take suspend_lock so that presuspend and postsuspend methods 2939 * do not race with internal suspend. 2940 */ 2941 mutex_lock(&md->suspend_lock); 2942 map = dm_get_live_table(md, &srcu_idx); 2943 if (!dm_suspended_md(md)) { 2944 dm_table_presuspend_targets(map); 2945 dm_table_postsuspend_targets(map); 2946 } 2947 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2948 dm_put_live_table(md, srcu_idx); 2949 mutex_unlock(&md->suspend_lock); 2950 2951 /* 2952 * Rare, but there may be I/O requests still going to complete, 2953 * for example. Wait for all references to disappear. 2954 * No one should increment the reference count of the mapped_device, 2955 * after the mapped_device state becomes DMF_FREEING. 2956 */ 2957 if (wait) 2958 while (atomic_read(&md->holders)) 2959 msleep(1); 2960 else if (atomic_read(&md->holders)) 2961 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2962 dm_device_name(md), atomic_read(&md->holders)); 2963 2964 dm_sysfs_exit(md); 2965 dm_table_destroy(__unbind(md)); 2966 free_dev(md); 2967 } 2968 2969 void dm_destroy(struct mapped_device *md) 2970 { 2971 __dm_destroy(md, true); 2972 } 2973 2974 void dm_destroy_immediate(struct mapped_device *md) 2975 { 2976 __dm_destroy(md, false); 2977 } 2978 2979 void dm_put(struct mapped_device *md) 2980 { 2981 atomic_dec(&md->holders); 2982 } 2983 EXPORT_SYMBOL_GPL(dm_put); 2984 2985 static int dm_wait_for_completion(struct mapped_device *md, int interruptible) 2986 { 2987 int r = 0; 2988 DECLARE_WAITQUEUE(wait, current); 2989 2990 add_wait_queue(&md->wait, &wait); 2991 2992 while (1) { 2993 set_current_state(interruptible); 2994 2995 if (!md_in_flight(md)) 2996 break; 2997 2998 if (interruptible == TASK_INTERRUPTIBLE && 2999 signal_pending(current)) { 3000 r = -EINTR; 3001 break; 3002 } 3003 3004 io_schedule(); 3005 } 3006 set_current_state(TASK_RUNNING); 3007 3008 remove_wait_queue(&md->wait, &wait); 3009 3010 return r; 3011 } 3012 3013 /* 3014 * Process the deferred bios 3015 */ 3016 static void dm_wq_work(struct work_struct *work) 3017 { 3018 struct mapped_device *md = container_of(work, struct mapped_device, 3019 work); 3020 struct bio *c; 3021 int srcu_idx; 3022 struct dm_table *map; 3023 3024 map = dm_get_live_table(md, &srcu_idx); 3025 3026 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 3027 spin_lock_irq(&md->deferred_lock); 3028 c = bio_list_pop(&md->deferred); 3029 spin_unlock_irq(&md->deferred_lock); 3030 3031 if (!c) 3032 break; 3033 3034 if (dm_request_based(md)) 3035 generic_make_request(c); 3036 else 3037 __split_and_process_bio(md, map, c); 3038 } 3039 3040 dm_put_live_table(md, srcu_idx); 3041 } 3042 3043 static void dm_queue_flush(struct mapped_device *md) 3044 { 3045 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 3046 smp_mb__after_atomic(); 3047 queue_work(md->wq, &md->work); 3048 } 3049 3050 /* 3051 * Swap in a new table, returning the old one for the caller to destroy. 3052 */ 3053 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 3054 { 3055 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 3056 struct queue_limits limits; 3057 int r; 3058 3059 mutex_lock(&md->suspend_lock); 3060 3061 /* device must be suspended */ 3062 if (!dm_suspended_md(md)) 3063 goto out; 3064 3065 /* 3066 * If the new table has no data devices, retain the existing limits. 3067 * This helps multipath with queue_if_no_path if all paths disappear, 3068 * then new I/O is queued based on these limits, and then some paths 3069 * reappear. 3070 */ 3071 if (dm_table_has_no_data_devices(table)) { 3072 live_map = dm_get_live_table_fast(md); 3073 if (live_map) 3074 limits = md->queue->limits; 3075 dm_put_live_table_fast(md); 3076 } 3077 3078 if (!live_map) { 3079 r = dm_calculate_queue_limits(table, &limits); 3080 if (r) { 3081 map = ERR_PTR(r); 3082 goto out; 3083 } 3084 } 3085 3086 map = __bind(md, table, &limits); 3087 3088 out: 3089 mutex_unlock(&md->suspend_lock); 3090 return map; 3091 } 3092 3093 /* 3094 * Functions to lock and unlock any filesystem running on the 3095 * device. 3096 */ 3097 static int lock_fs(struct mapped_device *md) 3098 { 3099 int r; 3100 3101 WARN_ON(md->frozen_sb); 3102 3103 md->frozen_sb = freeze_bdev(md->bdev); 3104 if (IS_ERR(md->frozen_sb)) { 3105 r = PTR_ERR(md->frozen_sb); 3106 md->frozen_sb = NULL; 3107 return r; 3108 } 3109 3110 set_bit(DMF_FROZEN, &md->flags); 3111 3112 return 0; 3113 } 3114 3115 static void unlock_fs(struct mapped_device *md) 3116 { 3117 if (!test_bit(DMF_FROZEN, &md->flags)) 3118 return; 3119 3120 thaw_bdev(md->bdev, md->frozen_sb); 3121 md->frozen_sb = NULL; 3122 clear_bit(DMF_FROZEN, &md->flags); 3123 } 3124 3125 /* 3126 * If __dm_suspend returns 0, the device is completely quiescent 3127 * now. There is no request-processing activity. All new requests 3128 * are being added to md->deferred list. 3129 * 3130 * Caller must hold md->suspend_lock 3131 */ 3132 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 3133 unsigned suspend_flags, int interruptible) 3134 { 3135 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 3136 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 3137 int r; 3138 3139 /* 3140 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 3141 * This flag is cleared before dm_suspend returns. 3142 */ 3143 if (noflush) 3144 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 3145 3146 /* 3147 * This gets reverted if there's an error later and the targets 3148 * provide the .presuspend_undo hook. 3149 */ 3150 dm_table_presuspend_targets(map); 3151 3152 /* 3153 * Flush I/O to the device. 3154 * Any I/O submitted after lock_fs() may not be flushed. 3155 * noflush takes precedence over do_lockfs. 3156 * (lock_fs() flushes I/Os and waits for them to complete.) 3157 */ 3158 if (!noflush && do_lockfs) { 3159 r = lock_fs(md); 3160 if (r) { 3161 dm_table_presuspend_undo_targets(map); 3162 return r; 3163 } 3164 } 3165 3166 /* 3167 * Here we must make sure that no processes are submitting requests 3168 * to target drivers i.e. no one may be executing 3169 * __split_and_process_bio. This is called from dm_request and 3170 * dm_wq_work. 3171 * 3172 * To get all processes out of __split_and_process_bio in dm_request, 3173 * we take the write lock. To prevent any process from reentering 3174 * __split_and_process_bio from dm_request and quiesce the thread 3175 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 3176 * flush_workqueue(md->wq). 3177 */ 3178 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 3179 if (map) 3180 synchronize_srcu(&md->io_barrier); 3181 3182 /* 3183 * Stop md->queue before flushing md->wq in case request-based 3184 * dm defers requests to md->wq from md->queue. 3185 */ 3186 if (dm_request_based(md)) { 3187 dm_stop_queue(md->queue); 3188 if (md->kworker_task) 3189 flush_kthread_worker(&md->kworker); 3190 } 3191 3192 flush_workqueue(md->wq); 3193 3194 /* 3195 * At this point no more requests are entering target request routines. 3196 * We call dm_wait_for_completion to wait for all existing requests 3197 * to finish. 3198 */ 3199 r = dm_wait_for_completion(md, interruptible); 3200 3201 if (noflush) 3202 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 3203 if (map) 3204 synchronize_srcu(&md->io_barrier); 3205 3206 /* were we interrupted ? */ 3207 if (r < 0) { 3208 dm_queue_flush(md); 3209 3210 if (dm_request_based(md)) 3211 dm_start_queue(md->queue); 3212 3213 unlock_fs(md); 3214 dm_table_presuspend_undo_targets(map); 3215 /* pushback list is already flushed, so skip flush */ 3216 } 3217 3218 return r; 3219 } 3220 3221 /* 3222 * We need to be able to change a mapping table under a mounted 3223 * filesystem. For example we might want to move some data in 3224 * the background. Before the table can be swapped with 3225 * dm_bind_table, dm_suspend must be called to flush any in 3226 * flight bios and ensure that any further io gets deferred. 3227 */ 3228 /* 3229 * Suspend mechanism in request-based dm. 3230 * 3231 * 1. Flush all I/Os by lock_fs() if needed. 3232 * 2. Stop dispatching any I/O by stopping the request_queue. 3233 * 3. Wait for all in-flight I/Os to be completed or requeued. 3234 * 3235 * To abort suspend, start the request_queue. 3236 */ 3237 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 3238 { 3239 struct dm_table *map = NULL; 3240 int r = 0; 3241 3242 retry: 3243 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 3244 3245 if (dm_suspended_md(md)) { 3246 r = -EINVAL; 3247 goto out_unlock; 3248 } 3249 3250 if (dm_suspended_internally_md(md)) { 3251 /* already internally suspended, wait for internal resume */ 3252 mutex_unlock(&md->suspend_lock); 3253 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 3254 if (r) 3255 return r; 3256 goto retry; 3257 } 3258 3259 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3260 3261 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE); 3262 if (r) 3263 goto out_unlock; 3264 3265 set_bit(DMF_SUSPENDED, &md->flags); 3266 3267 dm_table_postsuspend_targets(map); 3268 3269 out_unlock: 3270 mutex_unlock(&md->suspend_lock); 3271 return r; 3272 } 3273 3274 static int __dm_resume(struct mapped_device *md, struct dm_table *map) 3275 { 3276 if (map) { 3277 int r = dm_table_resume_targets(map); 3278 if (r) 3279 return r; 3280 } 3281 3282 dm_queue_flush(md); 3283 3284 /* 3285 * Flushing deferred I/Os must be done after targets are resumed 3286 * so that mapping of targets can work correctly. 3287 * Request-based dm is queueing the deferred I/Os in its request_queue. 3288 */ 3289 if (dm_request_based(md)) 3290 dm_start_queue(md->queue); 3291 3292 unlock_fs(md); 3293 3294 return 0; 3295 } 3296 3297 int dm_resume(struct mapped_device *md) 3298 { 3299 int r = -EINVAL; 3300 struct dm_table *map = NULL; 3301 3302 retry: 3303 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 3304 3305 if (!dm_suspended_md(md)) 3306 goto out; 3307 3308 if (dm_suspended_internally_md(md)) { 3309 /* already internally suspended, wait for internal resume */ 3310 mutex_unlock(&md->suspend_lock); 3311 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 3312 if (r) 3313 return r; 3314 goto retry; 3315 } 3316 3317 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3318 if (!map || !dm_table_get_size(map)) 3319 goto out; 3320 3321 r = __dm_resume(md, map); 3322 if (r) 3323 goto out; 3324 3325 clear_bit(DMF_SUSPENDED, &md->flags); 3326 3327 r = 0; 3328 out: 3329 mutex_unlock(&md->suspend_lock); 3330 3331 return r; 3332 } 3333 3334 /* 3335 * Internal suspend/resume works like userspace-driven suspend. It waits 3336 * until all bios finish and prevents issuing new bios to the target drivers. 3337 * It may be used only from the kernel. 3338 */ 3339 3340 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 3341 { 3342 struct dm_table *map = NULL; 3343 3344 if (md->internal_suspend_count++) 3345 return; /* nested internal suspend */ 3346 3347 if (dm_suspended_md(md)) { 3348 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3349 return; /* nest suspend */ 3350 } 3351 3352 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3353 3354 /* 3355 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 3356 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 3357 * would require changing .presuspend to return an error -- avoid this 3358 * until there is a need for more elaborate variants of internal suspend. 3359 */ 3360 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE); 3361 3362 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3363 3364 dm_table_postsuspend_targets(map); 3365 } 3366 3367 static void __dm_internal_resume(struct mapped_device *md) 3368 { 3369 BUG_ON(!md->internal_suspend_count); 3370 3371 if (--md->internal_suspend_count) 3372 return; /* resume from nested internal suspend */ 3373 3374 if (dm_suspended_md(md)) 3375 goto done; /* resume from nested suspend */ 3376 3377 /* 3378 * NOTE: existing callers don't need to call dm_table_resume_targets 3379 * (which may fail -- so best to avoid it for now by passing NULL map) 3380 */ 3381 (void) __dm_resume(md, NULL); 3382 3383 done: 3384 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3385 smp_mb__after_atomic(); 3386 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 3387 } 3388 3389 void dm_internal_suspend_noflush(struct mapped_device *md) 3390 { 3391 mutex_lock(&md->suspend_lock); 3392 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 3393 mutex_unlock(&md->suspend_lock); 3394 } 3395 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 3396 3397 void dm_internal_resume(struct mapped_device *md) 3398 { 3399 mutex_lock(&md->suspend_lock); 3400 __dm_internal_resume(md); 3401 mutex_unlock(&md->suspend_lock); 3402 } 3403 EXPORT_SYMBOL_GPL(dm_internal_resume); 3404 3405 /* 3406 * Fast variants of internal suspend/resume hold md->suspend_lock, 3407 * which prevents interaction with userspace-driven suspend. 3408 */ 3409 3410 void dm_internal_suspend_fast(struct mapped_device *md) 3411 { 3412 mutex_lock(&md->suspend_lock); 3413 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 3414 return; 3415 3416 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 3417 synchronize_srcu(&md->io_barrier); 3418 flush_workqueue(md->wq); 3419 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 3420 } 3421 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 3422 3423 void dm_internal_resume_fast(struct mapped_device *md) 3424 { 3425 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 3426 goto done; 3427 3428 dm_queue_flush(md); 3429 3430 done: 3431 mutex_unlock(&md->suspend_lock); 3432 } 3433 EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 3434 3435 /*----------------------------------------------------------------- 3436 * Event notification. 3437 *---------------------------------------------------------------*/ 3438 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 3439 unsigned cookie) 3440 { 3441 char udev_cookie[DM_COOKIE_LENGTH]; 3442 char *envp[] = { udev_cookie, NULL }; 3443 3444 if (!cookie) 3445 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 3446 else { 3447 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 3448 DM_COOKIE_ENV_VAR_NAME, cookie); 3449 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 3450 action, envp); 3451 } 3452 } 3453 3454 uint32_t dm_next_uevent_seq(struct mapped_device *md) 3455 { 3456 return atomic_add_return(1, &md->uevent_seq); 3457 } 3458 3459 uint32_t dm_get_event_nr(struct mapped_device *md) 3460 { 3461 return atomic_read(&md->event_nr); 3462 } 3463 3464 int dm_wait_event(struct mapped_device *md, int event_nr) 3465 { 3466 return wait_event_interruptible(md->eventq, 3467 (event_nr != atomic_read(&md->event_nr))); 3468 } 3469 3470 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 3471 { 3472 unsigned long flags; 3473 3474 spin_lock_irqsave(&md->uevent_lock, flags); 3475 list_add(elist, &md->uevent_list); 3476 spin_unlock_irqrestore(&md->uevent_lock, flags); 3477 } 3478 3479 /* 3480 * The gendisk is only valid as long as you have a reference 3481 * count on 'md'. 3482 */ 3483 struct gendisk *dm_disk(struct mapped_device *md) 3484 { 3485 return md->disk; 3486 } 3487 EXPORT_SYMBOL_GPL(dm_disk); 3488 3489 struct kobject *dm_kobject(struct mapped_device *md) 3490 { 3491 return &md->kobj_holder.kobj; 3492 } 3493 3494 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 3495 { 3496 struct mapped_device *md; 3497 3498 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 3499 3500 if (test_bit(DMF_FREEING, &md->flags) || 3501 dm_deleting_md(md)) 3502 return NULL; 3503 3504 dm_get(md); 3505 return md; 3506 } 3507 3508 int dm_suspended_md(struct mapped_device *md) 3509 { 3510 return test_bit(DMF_SUSPENDED, &md->flags); 3511 } 3512 3513 int dm_suspended_internally_md(struct mapped_device *md) 3514 { 3515 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3516 } 3517 3518 int dm_test_deferred_remove_flag(struct mapped_device *md) 3519 { 3520 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 3521 } 3522 3523 int dm_suspended(struct dm_target *ti) 3524 { 3525 return dm_suspended_md(dm_table_get_md(ti->table)); 3526 } 3527 EXPORT_SYMBOL_GPL(dm_suspended); 3528 3529 int dm_noflush_suspending(struct dm_target *ti) 3530 { 3531 return __noflush_suspending(dm_table_get_md(ti->table)); 3532 } 3533 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 3534 3535 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, 3536 unsigned integrity, unsigned per_io_data_size) 3537 { 3538 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 3539 struct kmem_cache *cachep = NULL; 3540 unsigned int pool_size = 0; 3541 unsigned int front_pad; 3542 3543 if (!pools) 3544 return NULL; 3545 3546 type = filter_md_type(type, md); 3547 3548 switch (type) { 3549 case DM_TYPE_BIO_BASED: 3550 cachep = _io_cache; 3551 pool_size = dm_get_reserved_bio_based_ios(); 3552 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 3553 break; 3554 case DM_TYPE_REQUEST_BASED: 3555 cachep = _rq_tio_cache; 3556 pool_size = dm_get_reserved_rq_based_ios(); 3557 pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); 3558 if (!pools->rq_pool) 3559 goto out; 3560 /* fall through to setup remaining rq-based pools */ 3561 case DM_TYPE_MQ_REQUEST_BASED: 3562 if (!pool_size) 3563 pool_size = dm_get_reserved_rq_based_ios(); 3564 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 3565 /* per_io_data_size is used for blk-mq pdu at queue allocation */ 3566 break; 3567 default: 3568 BUG(); 3569 } 3570 3571 if (cachep) { 3572 pools->io_pool = mempool_create_slab_pool(pool_size, cachep); 3573 if (!pools->io_pool) 3574 goto out; 3575 } 3576 3577 pools->bs = bioset_create_nobvec(pool_size, front_pad); 3578 if (!pools->bs) 3579 goto out; 3580 3581 if (integrity && bioset_integrity_create(pools->bs, pool_size)) 3582 goto out; 3583 3584 return pools; 3585 3586 out: 3587 dm_free_md_mempools(pools); 3588 3589 return NULL; 3590 } 3591 3592 void dm_free_md_mempools(struct dm_md_mempools *pools) 3593 { 3594 if (!pools) 3595 return; 3596 3597 mempool_destroy(pools->io_pool); 3598 mempool_destroy(pools->rq_pool); 3599 3600 if (pools->bs) 3601 bioset_free(pools->bs); 3602 3603 kfree(pools); 3604 } 3605 3606 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 3607 u32 flags) 3608 { 3609 struct mapped_device *md = bdev->bd_disk->private_data; 3610 const struct pr_ops *ops; 3611 fmode_t mode; 3612 int r; 3613 3614 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3615 if (r < 0) 3616 return r; 3617 3618 ops = bdev->bd_disk->fops->pr_ops; 3619 if (ops && ops->pr_register) 3620 r = ops->pr_register(bdev, old_key, new_key, flags); 3621 else 3622 r = -EOPNOTSUPP; 3623 3624 bdput(bdev); 3625 return r; 3626 } 3627 3628 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 3629 u32 flags) 3630 { 3631 struct mapped_device *md = bdev->bd_disk->private_data; 3632 const struct pr_ops *ops; 3633 fmode_t mode; 3634 int r; 3635 3636 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3637 if (r < 0) 3638 return r; 3639 3640 ops = bdev->bd_disk->fops->pr_ops; 3641 if (ops && ops->pr_reserve) 3642 r = ops->pr_reserve(bdev, key, type, flags); 3643 else 3644 r = -EOPNOTSUPP; 3645 3646 bdput(bdev); 3647 return r; 3648 } 3649 3650 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 3651 { 3652 struct mapped_device *md = bdev->bd_disk->private_data; 3653 const struct pr_ops *ops; 3654 fmode_t mode; 3655 int r; 3656 3657 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3658 if (r < 0) 3659 return r; 3660 3661 ops = bdev->bd_disk->fops->pr_ops; 3662 if (ops && ops->pr_release) 3663 r = ops->pr_release(bdev, key, type); 3664 else 3665 r = -EOPNOTSUPP; 3666 3667 bdput(bdev); 3668 return r; 3669 } 3670 3671 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 3672 enum pr_type type, bool abort) 3673 { 3674 struct mapped_device *md = bdev->bd_disk->private_data; 3675 const struct pr_ops *ops; 3676 fmode_t mode; 3677 int r; 3678 3679 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3680 if (r < 0) 3681 return r; 3682 3683 ops = bdev->bd_disk->fops->pr_ops; 3684 if (ops && ops->pr_preempt) 3685 r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 3686 else 3687 r = -EOPNOTSUPP; 3688 3689 bdput(bdev); 3690 return r; 3691 } 3692 3693 static int dm_pr_clear(struct block_device *bdev, u64 key) 3694 { 3695 struct mapped_device *md = bdev->bd_disk->private_data; 3696 const struct pr_ops *ops; 3697 fmode_t mode; 3698 int r; 3699 3700 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3701 if (r < 0) 3702 return r; 3703 3704 ops = bdev->bd_disk->fops->pr_ops; 3705 if (ops && ops->pr_clear) 3706 r = ops->pr_clear(bdev, key); 3707 else 3708 r = -EOPNOTSUPP; 3709 3710 bdput(bdev); 3711 return r; 3712 } 3713 3714 static const struct pr_ops dm_pr_ops = { 3715 .pr_register = dm_pr_register, 3716 .pr_reserve = dm_pr_reserve, 3717 .pr_release = dm_pr_release, 3718 .pr_preempt = dm_pr_preempt, 3719 .pr_clear = dm_pr_clear, 3720 }; 3721 3722 static const struct block_device_operations dm_blk_dops = { 3723 .open = dm_blk_open, 3724 .release = dm_blk_close, 3725 .ioctl = dm_blk_ioctl, 3726 .getgeo = dm_blk_getgeo, 3727 .pr_ops = &dm_pr_ops, 3728 .owner = THIS_MODULE 3729 }; 3730 3731 /* 3732 * module hooks 3733 */ 3734 module_init(dm_init); 3735 module_exit(dm_exit); 3736 3737 module_param(major, uint, 0); 3738 MODULE_PARM_DESC(major, "The major number of the device mapper"); 3739 3740 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3741 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3742 3743 module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); 3744 MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); 3745 3746 module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR); 3747 MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices"); 3748 3749 module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR); 3750 MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices"); 3751 3752 module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR); 3753 MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices"); 3754 3755 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3756 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3757 3758 MODULE_DESCRIPTION(DM_NAME " driver"); 3759 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3760 MODULE_LICENSE("GPL"); 3761