1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm.h" 9 #include "dm-uevent.h" 10 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/mutex.h> 14 #include <linux/moduleparam.h> 15 #include <linux/blkpg.h> 16 #include <linux/bio.h> 17 #include <linux/mempool.h> 18 #include <linux/slab.h> 19 #include <linux/idr.h> 20 #include <linux/hdreg.h> 21 #include <linux/delay.h> 22 #include <linux/wait.h> 23 #include <linux/kthread.h> 24 #include <linux/ktime.h> 25 #include <linux/elevator.h> /* for rq_end_sector() */ 26 #include <linux/blk-mq.h> 27 #include <linux/pr.h> 28 29 #include <trace/events/block.h> 30 31 #define DM_MSG_PREFIX "core" 32 33 #ifdef CONFIG_PRINTK 34 /* 35 * ratelimit state to be used in DMXXX_LIMIT(). 36 */ 37 DEFINE_RATELIMIT_STATE(dm_ratelimit_state, 38 DEFAULT_RATELIMIT_INTERVAL, 39 DEFAULT_RATELIMIT_BURST); 40 EXPORT_SYMBOL(dm_ratelimit_state); 41 #endif 42 43 /* 44 * Cookies are numeric values sent with CHANGE and REMOVE 45 * uevents while resuming, removing or renaming the device. 46 */ 47 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 48 #define DM_COOKIE_LENGTH 24 49 50 static const char *_name = DM_NAME; 51 52 static unsigned int major = 0; 53 static unsigned int _major = 0; 54 55 static DEFINE_IDR(_minor_idr); 56 57 static DEFINE_SPINLOCK(_minor_lock); 58 59 static void do_deferred_remove(struct work_struct *w); 60 61 static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 62 63 static struct workqueue_struct *deferred_remove_workqueue; 64 65 /* 66 * For bio-based dm. 67 * One of these is allocated per bio. 68 */ 69 struct dm_io { 70 struct mapped_device *md; 71 int error; 72 atomic_t io_count; 73 struct bio *bio; 74 unsigned long start_time; 75 spinlock_t endio_lock; 76 struct dm_stats_aux stats_aux; 77 }; 78 79 /* 80 * For request-based dm. 81 * One of these is allocated per request. 82 */ 83 struct dm_rq_target_io { 84 struct mapped_device *md; 85 struct dm_target *ti; 86 struct request *orig, *clone; 87 struct kthread_work work; 88 int error; 89 union map_info info; 90 struct dm_stats_aux stats_aux; 91 unsigned long duration_jiffies; 92 unsigned n_sectors; 93 }; 94 95 /* 96 * For request-based dm - the bio clones we allocate are embedded in these 97 * structs. 98 * 99 * We allocate these with bio_alloc_bioset, using the front_pad parameter when 100 * the bioset is created - this means the bio has to come at the end of the 101 * struct. 102 */ 103 struct dm_rq_clone_bio_info { 104 struct bio *orig; 105 struct dm_rq_target_io *tio; 106 struct bio clone; 107 }; 108 109 #define MINOR_ALLOCED ((void *)-1) 110 111 /* 112 * Bits for the md->flags field. 113 */ 114 #define DMF_BLOCK_IO_FOR_SUSPEND 0 115 #define DMF_SUSPENDED 1 116 #define DMF_FROZEN 2 117 #define DMF_FREEING 3 118 #define DMF_DELETING 4 119 #define DMF_NOFLUSH_SUSPENDING 5 120 #define DMF_DEFERRED_REMOVE 6 121 #define DMF_SUSPENDED_INTERNALLY 7 122 123 /* 124 * Work processed by per-device workqueue. 125 */ 126 struct mapped_device { 127 struct srcu_struct io_barrier; 128 struct mutex suspend_lock; 129 130 /* 131 * The current mapping (struct dm_table *). 132 * Use dm_get_live_table{_fast} or take suspend_lock for 133 * dereference. 134 */ 135 void __rcu *map; 136 137 struct list_head table_devices; 138 struct mutex table_devices_lock; 139 140 unsigned long flags; 141 142 struct request_queue *queue; 143 int numa_node_id; 144 145 unsigned type; 146 /* Protect queue and type against concurrent access. */ 147 struct mutex type_lock; 148 149 atomic_t holders; 150 atomic_t open_count; 151 152 struct dm_target *immutable_target; 153 struct target_type *immutable_target_type; 154 155 struct gendisk *disk; 156 char name[16]; 157 158 void *interface_ptr; 159 160 /* 161 * A list of ios that arrived while we were suspended. 162 */ 163 atomic_t pending[2]; 164 wait_queue_head_t wait; 165 struct work_struct work; 166 spinlock_t deferred_lock; 167 struct bio_list deferred; 168 169 /* 170 * Event handling. 171 */ 172 wait_queue_head_t eventq; 173 atomic_t event_nr; 174 atomic_t uevent_seq; 175 struct list_head uevent_list; 176 spinlock_t uevent_lock; /* Protect access to uevent_list */ 177 178 /* the number of internal suspends */ 179 unsigned internal_suspend_count; 180 181 /* 182 * Processing queue (flush) 183 */ 184 struct workqueue_struct *wq; 185 186 /* 187 * io objects are allocated from here. 188 */ 189 mempool_t *io_pool; 190 mempool_t *rq_pool; 191 192 struct bio_set *bs; 193 194 /* 195 * freeze/thaw support require holding onto a super block 196 */ 197 struct super_block *frozen_sb; 198 199 /* forced geometry settings */ 200 struct hd_geometry geometry; 201 202 struct block_device *bdev; 203 204 /* kobject and completion */ 205 struct dm_kobject_holder kobj_holder; 206 207 /* zero-length flush that will be cloned and submitted to targets */ 208 struct bio flush_bio; 209 210 struct dm_stats stats; 211 212 struct kthread_worker kworker; 213 struct task_struct *kworker_task; 214 215 /* for request-based merge heuristic in dm_request_fn() */ 216 unsigned seq_rq_merge_deadline_usecs; 217 int last_rq_rw; 218 sector_t last_rq_pos; 219 ktime_t last_rq_start_time; 220 221 /* for blk-mq request-based DM support */ 222 struct blk_mq_tag_set *tag_set; 223 bool use_blk_mq:1; 224 bool init_tio_pdu:1; 225 }; 226 227 #ifdef CONFIG_DM_MQ_DEFAULT 228 static bool use_blk_mq = true; 229 #else 230 static bool use_blk_mq = false; 231 #endif 232 233 #define DM_MQ_NR_HW_QUEUES 1 234 #define DM_MQ_QUEUE_DEPTH 2048 235 #define DM_NUMA_NODE NUMA_NO_NODE 236 237 static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES; 238 static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH; 239 static int dm_numa_node = DM_NUMA_NODE; 240 241 bool dm_use_blk_mq(struct mapped_device *md) 242 { 243 return md->use_blk_mq; 244 } 245 EXPORT_SYMBOL_GPL(dm_use_blk_mq); 246 247 /* 248 * For mempools pre-allocation at the table loading time. 249 */ 250 struct dm_md_mempools { 251 mempool_t *io_pool; 252 mempool_t *rq_pool; 253 struct bio_set *bs; 254 }; 255 256 struct table_device { 257 struct list_head list; 258 atomic_t count; 259 struct dm_dev dm_dev; 260 }; 261 262 #define RESERVED_BIO_BASED_IOS 16 263 #define RESERVED_REQUEST_BASED_IOS 256 264 #define RESERVED_MAX_IOS 1024 265 static struct kmem_cache *_io_cache; 266 static struct kmem_cache *_rq_tio_cache; 267 static struct kmem_cache *_rq_cache; 268 269 /* 270 * Bio-based DM's mempools' reserved IOs set by the user. 271 */ 272 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 273 274 /* 275 * Request-based DM's mempools' reserved IOs set by the user. 276 */ 277 static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; 278 279 static int __dm_get_module_param_int(int *module_param, int min, int max) 280 { 281 int param = ACCESS_ONCE(*module_param); 282 int modified_param = 0; 283 bool modified = true; 284 285 if (param < min) 286 modified_param = min; 287 else if (param > max) 288 modified_param = max; 289 else 290 modified = false; 291 292 if (modified) { 293 (void)cmpxchg(module_param, param, modified_param); 294 param = modified_param; 295 } 296 297 return param; 298 } 299 300 static unsigned __dm_get_module_param(unsigned *module_param, 301 unsigned def, unsigned max) 302 { 303 unsigned param = ACCESS_ONCE(*module_param); 304 unsigned modified_param = 0; 305 306 if (!param) 307 modified_param = def; 308 else if (param > max) 309 modified_param = max; 310 311 if (modified_param) { 312 (void)cmpxchg(module_param, param, modified_param); 313 param = modified_param; 314 } 315 316 return param; 317 } 318 319 unsigned dm_get_reserved_bio_based_ios(void) 320 { 321 return __dm_get_module_param(&reserved_bio_based_ios, 322 RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS); 323 } 324 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 325 326 unsigned dm_get_reserved_rq_based_ios(void) 327 { 328 return __dm_get_module_param(&reserved_rq_based_ios, 329 RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS); 330 } 331 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); 332 333 static unsigned dm_get_blk_mq_nr_hw_queues(void) 334 { 335 return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32); 336 } 337 338 static unsigned dm_get_blk_mq_queue_depth(void) 339 { 340 return __dm_get_module_param(&dm_mq_queue_depth, 341 DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH); 342 } 343 344 static unsigned dm_get_numa_node(void) 345 { 346 return __dm_get_module_param_int(&dm_numa_node, 347 DM_NUMA_NODE, num_online_nodes() - 1); 348 } 349 350 static int __init local_init(void) 351 { 352 int r = -ENOMEM; 353 354 /* allocate a slab for the dm_ios */ 355 _io_cache = KMEM_CACHE(dm_io, 0); 356 if (!_io_cache) 357 return r; 358 359 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 360 if (!_rq_tio_cache) 361 goto out_free_io_cache; 362 363 _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request), 364 __alignof__(struct request), 0, NULL); 365 if (!_rq_cache) 366 goto out_free_rq_tio_cache; 367 368 r = dm_uevent_init(); 369 if (r) 370 goto out_free_rq_cache; 371 372 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 373 if (!deferred_remove_workqueue) { 374 r = -ENOMEM; 375 goto out_uevent_exit; 376 } 377 378 _major = major; 379 r = register_blkdev(_major, _name); 380 if (r < 0) 381 goto out_free_workqueue; 382 383 if (!_major) 384 _major = r; 385 386 return 0; 387 388 out_free_workqueue: 389 destroy_workqueue(deferred_remove_workqueue); 390 out_uevent_exit: 391 dm_uevent_exit(); 392 out_free_rq_cache: 393 kmem_cache_destroy(_rq_cache); 394 out_free_rq_tio_cache: 395 kmem_cache_destroy(_rq_tio_cache); 396 out_free_io_cache: 397 kmem_cache_destroy(_io_cache); 398 399 return r; 400 } 401 402 static void local_exit(void) 403 { 404 flush_scheduled_work(); 405 destroy_workqueue(deferred_remove_workqueue); 406 407 kmem_cache_destroy(_rq_cache); 408 kmem_cache_destroy(_rq_tio_cache); 409 kmem_cache_destroy(_io_cache); 410 unregister_blkdev(_major, _name); 411 dm_uevent_exit(); 412 413 _major = 0; 414 415 DMINFO("cleaned up"); 416 } 417 418 static int (*_inits[])(void) __initdata = { 419 local_init, 420 dm_target_init, 421 dm_linear_init, 422 dm_stripe_init, 423 dm_io_init, 424 dm_kcopyd_init, 425 dm_interface_init, 426 dm_statistics_init, 427 }; 428 429 static void (*_exits[])(void) = { 430 local_exit, 431 dm_target_exit, 432 dm_linear_exit, 433 dm_stripe_exit, 434 dm_io_exit, 435 dm_kcopyd_exit, 436 dm_interface_exit, 437 dm_statistics_exit, 438 }; 439 440 static int __init dm_init(void) 441 { 442 const int count = ARRAY_SIZE(_inits); 443 444 int r, i; 445 446 for (i = 0; i < count; i++) { 447 r = _inits[i](); 448 if (r) 449 goto bad; 450 } 451 452 return 0; 453 454 bad: 455 while (i--) 456 _exits[i](); 457 458 return r; 459 } 460 461 static void __exit dm_exit(void) 462 { 463 int i = ARRAY_SIZE(_exits); 464 465 while (i--) 466 _exits[i](); 467 468 /* 469 * Should be empty by this point. 470 */ 471 idr_destroy(&_minor_idr); 472 } 473 474 /* 475 * Block device functions 476 */ 477 int dm_deleting_md(struct mapped_device *md) 478 { 479 return test_bit(DMF_DELETING, &md->flags); 480 } 481 482 static int dm_blk_open(struct block_device *bdev, fmode_t mode) 483 { 484 struct mapped_device *md; 485 486 spin_lock(&_minor_lock); 487 488 md = bdev->bd_disk->private_data; 489 if (!md) 490 goto out; 491 492 if (test_bit(DMF_FREEING, &md->flags) || 493 dm_deleting_md(md)) { 494 md = NULL; 495 goto out; 496 } 497 498 dm_get(md); 499 atomic_inc(&md->open_count); 500 out: 501 spin_unlock(&_minor_lock); 502 503 return md ? 0 : -ENXIO; 504 } 505 506 static void dm_blk_close(struct gendisk *disk, fmode_t mode) 507 { 508 struct mapped_device *md; 509 510 spin_lock(&_minor_lock); 511 512 md = disk->private_data; 513 if (WARN_ON(!md)) 514 goto out; 515 516 if (atomic_dec_and_test(&md->open_count) && 517 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 518 queue_work(deferred_remove_workqueue, &deferred_remove_work); 519 520 dm_put(md); 521 out: 522 spin_unlock(&_minor_lock); 523 } 524 525 int dm_open_count(struct mapped_device *md) 526 { 527 return atomic_read(&md->open_count); 528 } 529 530 /* 531 * Guarantees nothing is using the device before it's deleted. 532 */ 533 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 534 { 535 int r = 0; 536 537 spin_lock(&_minor_lock); 538 539 if (dm_open_count(md)) { 540 r = -EBUSY; 541 if (mark_deferred) 542 set_bit(DMF_DEFERRED_REMOVE, &md->flags); 543 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 544 r = -EEXIST; 545 else 546 set_bit(DMF_DELETING, &md->flags); 547 548 spin_unlock(&_minor_lock); 549 550 return r; 551 } 552 553 int dm_cancel_deferred_remove(struct mapped_device *md) 554 { 555 int r = 0; 556 557 spin_lock(&_minor_lock); 558 559 if (test_bit(DMF_DELETING, &md->flags)) 560 r = -EBUSY; 561 else 562 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 563 564 spin_unlock(&_minor_lock); 565 566 return r; 567 } 568 569 static void do_deferred_remove(struct work_struct *w) 570 { 571 dm_deferred_remove(); 572 } 573 574 sector_t dm_get_size(struct mapped_device *md) 575 { 576 return get_capacity(md->disk); 577 } 578 579 struct request_queue *dm_get_md_queue(struct mapped_device *md) 580 { 581 return md->queue; 582 } 583 584 struct dm_stats *dm_get_stats(struct mapped_device *md) 585 { 586 return &md->stats; 587 } 588 589 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 590 { 591 struct mapped_device *md = bdev->bd_disk->private_data; 592 593 return dm_get_geometry(md, geo); 594 } 595 596 static int dm_grab_bdev_for_ioctl(struct mapped_device *md, 597 struct block_device **bdev, 598 fmode_t *mode) 599 { 600 struct dm_target *tgt; 601 struct dm_table *map; 602 int srcu_idx, r; 603 604 retry: 605 r = -ENOTTY; 606 map = dm_get_live_table(md, &srcu_idx); 607 if (!map || !dm_table_get_size(map)) 608 goto out; 609 610 /* We only support devices that have a single target */ 611 if (dm_table_get_num_targets(map) != 1) 612 goto out; 613 614 tgt = dm_table_get_target(map, 0); 615 if (!tgt->type->prepare_ioctl) 616 goto out; 617 618 if (dm_suspended_md(md)) { 619 r = -EAGAIN; 620 goto out; 621 } 622 623 r = tgt->type->prepare_ioctl(tgt, bdev, mode); 624 if (r < 0) 625 goto out; 626 627 bdgrab(*bdev); 628 dm_put_live_table(md, srcu_idx); 629 return r; 630 631 out: 632 dm_put_live_table(md, srcu_idx); 633 if (r == -ENOTCONN && !fatal_signal_pending(current)) { 634 msleep(10); 635 goto retry; 636 } 637 return r; 638 } 639 640 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 641 unsigned int cmd, unsigned long arg) 642 { 643 struct mapped_device *md = bdev->bd_disk->private_data; 644 int r; 645 646 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 647 if (r < 0) 648 return r; 649 650 if (r > 0) { 651 /* 652 * Target determined this ioctl is being issued against 653 * a logical partition of the parent bdev; so extra 654 * validation is needed. 655 */ 656 r = scsi_verify_blk_ioctl(NULL, cmd); 657 if (r) 658 goto out; 659 } 660 661 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 662 out: 663 bdput(bdev); 664 return r; 665 } 666 667 static struct dm_io *alloc_io(struct mapped_device *md) 668 { 669 return mempool_alloc(md->io_pool, GFP_NOIO); 670 } 671 672 static void free_io(struct mapped_device *md, struct dm_io *io) 673 { 674 mempool_free(io, md->io_pool); 675 } 676 677 static void free_tio(struct dm_target_io *tio) 678 { 679 bio_put(&tio->clone); 680 } 681 682 static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md, 683 gfp_t gfp_mask) 684 { 685 return mempool_alloc(md->io_pool, gfp_mask); 686 } 687 688 static void free_old_rq_tio(struct dm_rq_target_io *tio) 689 { 690 mempool_free(tio, tio->md->io_pool); 691 } 692 693 static struct request *alloc_old_clone_request(struct mapped_device *md, 694 gfp_t gfp_mask) 695 { 696 return mempool_alloc(md->rq_pool, gfp_mask); 697 } 698 699 static void free_old_clone_request(struct mapped_device *md, struct request *rq) 700 { 701 mempool_free(rq, md->rq_pool); 702 } 703 704 static int md_in_flight(struct mapped_device *md) 705 { 706 return atomic_read(&md->pending[READ]) + 707 atomic_read(&md->pending[WRITE]); 708 } 709 710 static void start_io_acct(struct dm_io *io) 711 { 712 struct mapped_device *md = io->md; 713 struct bio *bio = io->bio; 714 int cpu; 715 int rw = bio_data_dir(bio); 716 717 io->start_time = jiffies; 718 719 cpu = part_stat_lock(); 720 part_round_stats(cpu, &dm_disk(md)->part0); 721 part_stat_unlock(); 722 atomic_set(&dm_disk(md)->part0.in_flight[rw], 723 atomic_inc_return(&md->pending[rw])); 724 725 if (unlikely(dm_stats_used(&md->stats))) 726 dm_stats_account_io(&md->stats, bio_data_dir(bio), 727 bio->bi_iter.bi_sector, bio_sectors(bio), 728 false, 0, &io->stats_aux); 729 } 730 731 static void end_io_acct(struct dm_io *io) 732 { 733 struct mapped_device *md = io->md; 734 struct bio *bio = io->bio; 735 unsigned long duration = jiffies - io->start_time; 736 int pending; 737 int rw = bio_data_dir(bio); 738 739 generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time); 740 741 if (unlikely(dm_stats_used(&md->stats))) 742 dm_stats_account_io(&md->stats, bio_data_dir(bio), 743 bio->bi_iter.bi_sector, bio_sectors(bio), 744 true, duration, &io->stats_aux); 745 746 /* 747 * After this is decremented the bio must not be touched if it is 748 * a flush. 749 */ 750 pending = atomic_dec_return(&md->pending[rw]); 751 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); 752 pending += atomic_read(&md->pending[rw^0x1]); 753 754 /* nudge anyone waiting on suspend queue */ 755 if (!pending) 756 wake_up(&md->wait); 757 } 758 759 /* 760 * Add the bio to the list of deferred io. 761 */ 762 static void queue_io(struct mapped_device *md, struct bio *bio) 763 { 764 unsigned long flags; 765 766 spin_lock_irqsave(&md->deferred_lock, flags); 767 bio_list_add(&md->deferred, bio); 768 spin_unlock_irqrestore(&md->deferred_lock, flags); 769 queue_work(md->wq, &md->work); 770 } 771 772 /* 773 * Everyone (including functions in this file), should use this 774 * function to access the md->map field, and make sure they call 775 * dm_put_live_table() when finished. 776 */ 777 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 778 { 779 *srcu_idx = srcu_read_lock(&md->io_barrier); 780 781 return srcu_dereference(md->map, &md->io_barrier); 782 } 783 784 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 785 { 786 srcu_read_unlock(&md->io_barrier, srcu_idx); 787 } 788 789 void dm_sync_table(struct mapped_device *md) 790 { 791 synchronize_srcu(&md->io_barrier); 792 synchronize_rcu_expedited(); 793 } 794 795 /* 796 * A fast alternative to dm_get_live_table/dm_put_live_table. 797 * The caller must not block between these two functions. 798 */ 799 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 800 { 801 rcu_read_lock(); 802 return rcu_dereference(md->map); 803 } 804 805 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 806 { 807 rcu_read_unlock(); 808 } 809 810 /* 811 * Open a table device so we can use it as a map destination. 812 */ 813 static int open_table_device(struct table_device *td, dev_t dev, 814 struct mapped_device *md) 815 { 816 static char *_claim_ptr = "I belong to device-mapper"; 817 struct block_device *bdev; 818 819 int r; 820 821 BUG_ON(td->dm_dev.bdev); 822 823 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); 824 if (IS_ERR(bdev)) 825 return PTR_ERR(bdev); 826 827 r = bd_link_disk_holder(bdev, dm_disk(md)); 828 if (r) { 829 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 830 return r; 831 } 832 833 td->dm_dev.bdev = bdev; 834 return 0; 835 } 836 837 /* 838 * Close a table device that we've been using. 839 */ 840 static void close_table_device(struct table_device *td, struct mapped_device *md) 841 { 842 if (!td->dm_dev.bdev) 843 return; 844 845 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 846 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 847 td->dm_dev.bdev = NULL; 848 } 849 850 static struct table_device *find_table_device(struct list_head *l, dev_t dev, 851 fmode_t mode) { 852 struct table_device *td; 853 854 list_for_each_entry(td, l, list) 855 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 856 return td; 857 858 return NULL; 859 } 860 861 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 862 struct dm_dev **result) { 863 int r; 864 struct table_device *td; 865 866 mutex_lock(&md->table_devices_lock); 867 td = find_table_device(&md->table_devices, dev, mode); 868 if (!td) { 869 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 870 if (!td) { 871 mutex_unlock(&md->table_devices_lock); 872 return -ENOMEM; 873 } 874 875 td->dm_dev.mode = mode; 876 td->dm_dev.bdev = NULL; 877 878 if ((r = open_table_device(td, dev, md))) { 879 mutex_unlock(&md->table_devices_lock); 880 kfree(td); 881 return r; 882 } 883 884 format_dev_t(td->dm_dev.name, dev); 885 886 atomic_set(&td->count, 0); 887 list_add(&td->list, &md->table_devices); 888 } 889 atomic_inc(&td->count); 890 mutex_unlock(&md->table_devices_lock); 891 892 *result = &td->dm_dev; 893 return 0; 894 } 895 EXPORT_SYMBOL_GPL(dm_get_table_device); 896 897 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 898 { 899 struct table_device *td = container_of(d, struct table_device, dm_dev); 900 901 mutex_lock(&md->table_devices_lock); 902 if (atomic_dec_and_test(&td->count)) { 903 close_table_device(td, md); 904 list_del(&td->list); 905 kfree(td); 906 } 907 mutex_unlock(&md->table_devices_lock); 908 } 909 EXPORT_SYMBOL(dm_put_table_device); 910 911 static void free_table_devices(struct list_head *devices) 912 { 913 struct list_head *tmp, *next; 914 915 list_for_each_safe(tmp, next, devices) { 916 struct table_device *td = list_entry(tmp, struct table_device, list); 917 918 DMWARN("dm_destroy: %s still exists with %d references", 919 td->dm_dev.name, atomic_read(&td->count)); 920 kfree(td); 921 } 922 } 923 924 /* 925 * Get the geometry associated with a dm device 926 */ 927 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 928 { 929 *geo = md->geometry; 930 931 return 0; 932 } 933 934 /* 935 * Set the geometry of a device. 936 */ 937 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 938 { 939 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 940 941 if (geo->start > sz) { 942 DMWARN("Start sector is beyond the geometry limits."); 943 return -EINVAL; 944 } 945 946 md->geometry = *geo; 947 948 return 0; 949 } 950 951 /*----------------------------------------------------------------- 952 * CRUD START: 953 * A more elegant soln is in the works that uses the queue 954 * merge fn, unfortunately there are a couple of changes to 955 * the block layer that I want to make for this. So in the 956 * interests of getting something for people to use I give 957 * you this clearly demarcated crap. 958 *---------------------------------------------------------------*/ 959 960 static int __noflush_suspending(struct mapped_device *md) 961 { 962 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 963 } 964 965 /* 966 * Decrements the number of outstanding ios that a bio has been 967 * cloned into, completing the original io if necc. 968 */ 969 static void dec_pending(struct dm_io *io, int error) 970 { 971 unsigned long flags; 972 int io_error; 973 struct bio *bio; 974 struct mapped_device *md = io->md; 975 976 /* Push-back supersedes any I/O errors */ 977 if (unlikely(error)) { 978 spin_lock_irqsave(&io->endio_lock, flags); 979 if (!(io->error > 0 && __noflush_suspending(md))) 980 io->error = error; 981 spin_unlock_irqrestore(&io->endio_lock, flags); 982 } 983 984 if (atomic_dec_and_test(&io->io_count)) { 985 if (io->error == DM_ENDIO_REQUEUE) { 986 /* 987 * Target requested pushing back the I/O. 988 */ 989 spin_lock_irqsave(&md->deferred_lock, flags); 990 if (__noflush_suspending(md)) 991 bio_list_add_head(&md->deferred, io->bio); 992 else 993 /* noflush suspend was interrupted. */ 994 io->error = -EIO; 995 spin_unlock_irqrestore(&md->deferred_lock, flags); 996 } 997 998 io_error = io->error; 999 bio = io->bio; 1000 end_io_acct(io); 1001 free_io(md, io); 1002 1003 if (io_error == DM_ENDIO_REQUEUE) 1004 return; 1005 1006 if ((bio->bi_rw & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 1007 /* 1008 * Preflush done for flush with data, reissue 1009 * without REQ_PREFLUSH. 1010 */ 1011 bio->bi_rw &= ~REQ_PREFLUSH; 1012 queue_io(md, bio); 1013 } else { 1014 /* done with normal IO or empty flush */ 1015 trace_block_bio_complete(md->queue, bio, io_error); 1016 bio->bi_error = io_error; 1017 bio_endio(bio); 1018 } 1019 } 1020 } 1021 1022 static void disable_write_same(struct mapped_device *md) 1023 { 1024 struct queue_limits *limits = dm_get_queue_limits(md); 1025 1026 /* device doesn't really support WRITE SAME, disable it */ 1027 limits->max_write_same_sectors = 0; 1028 } 1029 1030 static void clone_endio(struct bio *bio) 1031 { 1032 int error = bio->bi_error; 1033 int r = error; 1034 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1035 struct dm_io *io = tio->io; 1036 struct mapped_device *md = tio->io->md; 1037 dm_endio_fn endio = tio->ti->type->end_io; 1038 1039 if (endio) { 1040 r = endio(tio->ti, bio, error); 1041 if (r < 0 || r == DM_ENDIO_REQUEUE) 1042 /* 1043 * error and requeue request are handled 1044 * in dec_pending(). 1045 */ 1046 error = r; 1047 else if (r == DM_ENDIO_INCOMPLETE) 1048 /* The target will handle the io */ 1049 return; 1050 else if (r) { 1051 DMWARN("unimplemented target endio return value: %d", r); 1052 BUG(); 1053 } 1054 } 1055 1056 if (unlikely(r == -EREMOTEIO && (bio_op(bio) == REQ_OP_WRITE_SAME) && 1057 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) 1058 disable_write_same(md); 1059 1060 free_tio(tio); 1061 dec_pending(io, error); 1062 } 1063 1064 /* 1065 * Partial completion handling for request-based dm 1066 */ 1067 static void end_clone_bio(struct bio *clone) 1068 { 1069 struct dm_rq_clone_bio_info *info = 1070 container_of(clone, struct dm_rq_clone_bio_info, clone); 1071 struct dm_rq_target_io *tio = info->tio; 1072 struct bio *bio = info->orig; 1073 unsigned int nr_bytes = info->orig->bi_iter.bi_size; 1074 int error = clone->bi_error; 1075 1076 bio_put(clone); 1077 1078 if (tio->error) 1079 /* 1080 * An error has already been detected on the request. 1081 * Once error occurred, just let clone->end_io() handle 1082 * the remainder. 1083 */ 1084 return; 1085 else if (error) { 1086 /* 1087 * Don't notice the error to the upper layer yet. 1088 * The error handling decision is made by the target driver, 1089 * when the request is completed. 1090 */ 1091 tio->error = error; 1092 return; 1093 } 1094 1095 /* 1096 * I/O for the bio successfully completed. 1097 * Notice the data completion to the upper layer. 1098 */ 1099 1100 /* 1101 * bios are processed from the head of the list. 1102 * So the completing bio should always be rq->bio. 1103 * If it's not, something wrong is happening. 1104 */ 1105 if (tio->orig->bio != bio) 1106 DMERR("bio completion is going in the middle of the request"); 1107 1108 /* 1109 * Update the original request. 1110 * Do not use blk_end_request() here, because it may complete 1111 * the original request before the clone, and break the ordering. 1112 */ 1113 blk_update_request(tio->orig, 0, nr_bytes); 1114 } 1115 1116 static struct dm_rq_target_io *tio_from_request(struct request *rq) 1117 { 1118 return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); 1119 } 1120 1121 static void rq_end_stats(struct mapped_device *md, struct request *orig) 1122 { 1123 if (unlikely(dm_stats_used(&md->stats))) { 1124 struct dm_rq_target_io *tio = tio_from_request(orig); 1125 tio->duration_jiffies = jiffies - tio->duration_jiffies; 1126 dm_stats_account_io(&md->stats, rq_data_dir(orig), 1127 blk_rq_pos(orig), tio->n_sectors, true, 1128 tio->duration_jiffies, &tio->stats_aux); 1129 } 1130 } 1131 1132 /* 1133 * Don't touch any member of the md after calling this function because 1134 * the md may be freed in dm_put() at the end of this function. 1135 * Or do dm_get() before calling this function and dm_put() later. 1136 */ 1137 static void rq_completed(struct mapped_device *md, int rw, bool run_queue) 1138 { 1139 atomic_dec(&md->pending[rw]); 1140 1141 /* nudge anyone waiting on suspend queue */ 1142 if (!md_in_flight(md)) 1143 wake_up(&md->wait); 1144 1145 /* 1146 * Run this off this callpath, as drivers could invoke end_io while 1147 * inside their request_fn (and holding the queue lock). Calling 1148 * back into ->request_fn() could deadlock attempting to grab the 1149 * queue lock again. 1150 */ 1151 if (!md->queue->mq_ops && run_queue) 1152 blk_run_queue_async(md->queue); 1153 1154 /* 1155 * dm_put() must be at the end of this function. See the comment above 1156 */ 1157 dm_put(md); 1158 } 1159 1160 static void free_rq_clone(struct request *clone) 1161 { 1162 struct dm_rq_target_io *tio = clone->end_io_data; 1163 struct mapped_device *md = tio->md; 1164 1165 blk_rq_unprep_clone(clone); 1166 1167 if (md->type == DM_TYPE_MQ_REQUEST_BASED) 1168 /* stacked on blk-mq queue(s) */ 1169 tio->ti->type->release_clone_rq(clone); 1170 else if (!md->queue->mq_ops) 1171 /* request_fn queue stacked on request_fn queue(s) */ 1172 free_old_clone_request(md, clone); 1173 1174 if (!md->queue->mq_ops) 1175 free_old_rq_tio(tio); 1176 } 1177 1178 /* 1179 * Complete the clone and the original request. 1180 * Must be called without clone's queue lock held, 1181 * see end_clone_request() for more details. 1182 */ 1183 static void dm_end_request(struct request *clone, int error) 1184 { 1185 int rw = rq_data_dir(clone); 1186 struct dm_rq_target_io *tio = clone->end_io_data; 1187 struct mapped_device *md = tio->md; 1188 struct request *rq = tio->orig; 1189 1190 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 1191 rq->errors = clone->errors; 1192 rq->resid_len = clone->resid_len; 1193 1194 if (rq->sense) 1195 /* 1196 * We are using the sense buffer of the original 1197 * request. 1198 * So setting the length of the sense data is enough. 1199 */ 1200 rq->sense_len = clone->sense_len; 1201 } 1202 1203 free_rq_clone(clone); 1204 rq_end_stats(md, rq); 1205 if (!rq->q->mq_ops) 1206 blk_end_request_all(rq, error); 1207 else 1208 blk_mq_end_request(rq, error); 1209 rq_completed(md, rw, true); 1210 } 1211 1212 static void dm_unprep_request(struct request *rq) 1213 { 1214 struct dm_rq_target_io *tio = tio_from_request(rq); 1215 struct request *clone = tio->clone; 1216 1217 if (!rq->q->mq_ops) { 1218 rq->special = NULL; 1219 rq->cmd_flags &= ~REQ_DONTPREP; 1220 } 1221 1222 if (clone) 1223 free_rq_clone(clone); 1224 else if (!tio->md->queue->mq_ops) 1225 free_old_rq_tio(tio); 1226 } 1227 1228 /* 1229 * Requeue the original request of a clone. 1230 */ 1231 static void dm_old_requeue_request(struct request *rq) 1232 { 1233 struct request_queue *q = rq->q; 1234 unsigned long flags; 1235 1236 spin_lock_irqsave(q->queue_lock, flags); 1237 blk_requeue_request(q, rq); 1238 blk_run_queue_async(q); 1239 spin_unlock_irqrestore(q->queue_lock, flags); 1240 } 1241 1242 static void dm_mq_requeue_request(struct request *rq) 1243 { 1244 struct request_queue *q = rq->q; 1245 unsigned long flags; 1246 1247 blk_mq_requeue_request(rq); 1248 spin_lock_irqsave(q->queue_lock, flags); 1249 if (!blk_queue_stopped(q)) 1250 blk_mq_kick_requeue_list(q); 1251 spin_unlock_irqrestore(q->queue_lock, flags); 1252 } 1253 1254 static void dm_requeue_original_request(struct mapped_device *md, 1255 struct request *rq) 1256 { 1257 int rw = rq_data_dir(rq); 1258 1259 rq_end_stats(md, rq); 1260 dm_unprep_request(rq); 1261 1262 if (!rq->q->mq_ops) 1263 dm_old_requeue_request(rq); 1264 else 1265 dm_mq_requeue_request(rq); 1266 1267 rq_completed(md, rw, false); 1268 } 1269 1270 static void dm_old_stop_queue(struct request_queue *q) 1271 { 1272 unsigned long flags; 1273 1274 spin_lock_irqsave(q->queue_lock, flags); 1275 if (blk_queue_stopped(q)) { 1276 spin_unlock_irqrestore(q->queue_lock, flags); 1277 return; 1278 } 1279 1280 blk_stop_queue(q); 1281 spin_unlock_irqrestore(q->queue_lock, flags); 1282 } 1283 1284 static void dm_stop_queue(struct request_queue *q) 1285 { 1286 if (!q->mq_ops) 1287 dm_old_stop_queue(q); 1288 else 1289 blk_mq_stop_hw_queues(q); 1290 } 1291 1292 static void dm_old_start_queue(struct request_queue *q) 1293 { 1294 unsigned long flags; 1295 1296 spin_lock_irqsave(q->queue_lock, flags); 1297 if (blk_queue_stopped(q)) 1298 blk_start_queue(q); 1299 spin_unlock_irqrestore(q->queue_lock, flags); 1300 } 1301 1302 static void dm_start_queue(struct request_queue *q) 1303 { 1304 if (!q->mq_ops) 1305 dm_old_start_queue(q); 1306 else { 1307 blk_mq_start_stopped_hw_queues(q, true); 1308 blk_mq_kick_requeue_list(q); 1309 } 1310 } 1311 1312 static void dm_done(struct request *clone, int error, bool mapped) 1313 { 1314 int r = error; 1315 struct dm_rq_target_io *tio = clone->end_io_data; 1316 dm_request_endio_fn rq_end_io = NULL; 1317 1318 if (tio->ti) { 1319 rq_end_io = tio->ti->type->rq_end_io; 1320 1321 if (mapped && rq_end_io) 1322 r = rq_end_io(tio->ti, clone, error, &tio->info); 1323 } 1324 1325 if (unlikely(r == -EREMOTEIO && (req_op(clone) == REQ_OP_WRITE_SAME) && 1326 !clone->q->limits.max_write_same_sectors)) 1327 disable_write_same(tio->md); 1328 1329 if (r <= 0) 1330 /* The target wants to complete the I/O */ 1331 dm_end_request(clone, r); 1332 else if (r == DM_ENDIO_INCOMPLETE) 1333 /* The target will handle the I/O */ 1334 return; 1335 else if (r == DM_ENDIO_REQUEUE) 1336 /* The target wants to requeue the I/O */ 1337 dm_requeue_original_request(tio->md, tio->orig); 1338 else { 1339 DMWARN("unimplemented target endio return value: %d", r); 1340 BUG(); 1341 } 1342 } 1343 1344 /* 1345 * Request completion handler for request-based dm 1346 */ 1347 static void dm_softirq_done(struct request *rq) 1348 { 1349 bool mapped = true; 1350 struct dm_rq_target_io *tio = tio_from_request(rq); 1351 struct request *clone = tio->clone; 1352 int rw; 1353 1354 if (!clone) { 1355 rq_end_stats(tio->md, rq); 1356 rw = rq_data_dir(rq); 1357 if (!rq->q->mq_ops) { 1358 blk_end_request_all(rq, tio->error); 1359 rq_completed(tio->md, rw, false); 1360 free_old_rq_tio(tio); 1361 } else { 1362 blk_mq_end_request(rq, tio->error); 1363 rq_completed(tio->md, rw, false); 1364 } 1365 return; 1366 } 1367 1368 if (rq->cmd_flags & REQ_FAILED) 1369 mapped = false; 1370 1371 dm_done(clone, tio->error, mapped); 1372 } 1373 1374 /* 1375 * Complete the clone and the original request with the error status 1376 * through softirq context. 1377 */ 1378 static void dm_complete_request(struct request *rq, int error) 1379 { 1380 struct dm_rq_target_io *tio = tio_from_request(rq); 1381 1382 tio->error = error; 1383 if (!rq->q->mq_ops) 1384 blk_complete_request(rq); 1385 else 1386 blk_mq_complete_request(rq, error); 1387 } 1388 1389 /* 1390 * Complete the not-mapped clone and the original request with the error status 1391 * through softirq context. 1392 * Target's rq_end_io() function isn't called. 1393 * This may be used when the target's map_rq() or clone_and_map_rq() functions fail. 1394 */ 1395 static void dm_kill_unmapped_request(struct request *rq, int error) 1396 { 1397 rq->cmd_flags |= REQ_FAILED; 1398 dm_complete_request(rq, error); 1399 } 1400 1401 /* 1402 * Called with the clone's queue lock held (in the case of .request_fn) 1403 */ 1404 static void end_clone_request(struct request *clone, int error) 1405 { 1406 struct dm_rq_target_io *tio = clone->end_io_data; 1407 1408 if (!clone->q->mq_ops) { 1409 /* 1410 * For just cleaning up the information of the queue in which 1411 * the clone was dispatched. 1412 * The clone is *NOT* freed actually here because it is alloced 1413 * from dm own mempool (REQ_ALLOCED isn't set). 1414 */ 1415 __blk_put_request(clone->q, clone); 1416 } 1417 1418 /* 1419 * Actual request completion is done in a softirq context which doesn't 1420 * hold the clone's queue lock. Otherwise, deadlock could occur because: 1421 * - another request may be submitted by the upper level driver 1422 * of the stacking during the completion 1423 * - the submission which requires queue lock may be done 1424 * against this clone's queue 1425 */ 1426 dm_complete_request(tio->orig, error); 1427 } 1428 1429 /* 1430 * Return maximum size of I/O possible at the supplied sector up to the current 1431 * target boundary. 1432 */ 1433 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 1434 { 1435 sector_t target_offset = dm_target_offset(ti, sector); 1436 1437 return ti->len - target_offset; 1438 } 1439 1440 static sector_t max_io_len(sector_t sector, struct dm_target *ti) 1441 { 1442 sector_t len = max_io_len_target_boundary(sector, ti); 1443 sector_t offset, max_len; 1444 1445 /* 1446 * Does the target need to split even further? 1447 */ 1448 if (ti->max_io_len) { 1449 offset = dm_target_offset(ti, sector); 1450 if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 1451 max_len = sector_div(offset, ti->max_io_len); 1452 else 1453 max_len = offset & (ti->max_io_len - 1); 1454 max_len = ti->max_io_len - max_len; 1455 1456 if (len > max_len) 1457 len = max_len; 1458 } 1459 1460 return len; 1461 } 1462 1463 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1464 { 1465 if (len > UINT_MAX) { 1466 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1467 (unsigned long long)len, UINT_MAX); 1468 ti->error = "Maximum size of target IO is too large"; 1469 return -EINVAL; 1470 } 1471 1472 ti->max_io_len = (uint32_t) len; 1473 1474 return 0; 1475 } 1476 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1477 1478 /* 1479 * A target may call dm_accept_partial_bio only from the map routine. It is 1480 * allowed for all bio types except REQ_PREFLUSH. 1481 * 1482 * dm_accept_partial_bio informs the dm that the target only wants to process 1483 * additional n_sectors sectors of the bio and the rest of the data should be 1484 * sent in a next bio. 1485 * 1486 * A diagram that explains the arithmetics: 1487 * +--------------------+---------------+-------+ 1488 * | 1 | 2 | 3 | 1489 * +--------------------+---------------+-------+ 1490 * 1491 * <-------------- *tio->len_ptr ---------------> 1492 * <------- bi_size -------> 1493 * <-- n_sectors --> 1494 * 1495 * Region 1 was already iterated over with bio_advance or similar function. 1496 * (it may be empty if the target doesn't use bio_advance) 1497 * Region 2 is the remaining bio size that the target wants to process. 1498 * (it may be empty if region 1 is non-empty, although there is no reason 1499 * to make it empty) 1500 * The target requires that region 3 is to be sent in the next bio. 1501 * 1502 * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 1503 * the partially processed part (the sum of regions 1+2) must be the same for all 1504 * copies of the bio. 1505 */ 1506 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 1507 { 1508 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1509 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 1510 BUG_ON(bio->bi_rw & REQ_PREFLUSH); 1511 BUG_ON(bi_size > *tio->len_ptr); 1512 BUG_ON(n_sectors > bi_size); 1513 *tio->len_ptr -= bi_size - n_sectors; 1514 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1515 } 1516 EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1517 1518 static void __map_bio(struct dm_target_io *tio) 1519 { 1520 int r; 1521 sector_t sector; 1522 struct bio *clone = &tio->clone; 1523 struct dm_target *ti = tio->ti; 1524 1525 clone->bi_end_io = clone_endio; 1526 1527 /* 1528 * Map the clone. If r == 0 we don't need to do 1529 * anything, the target has assumed ownership of 1530 * this io. 1531 */ 1532 atomic_inc(&tio->io->io_count); 1533 sector = clone->bi_iter.bi_sector; 1534 r = ti->type->map(ti, clone); 1535 if (r == DM_MAPIO_REMAPPED) { 1536 /* the bio has been remapped so dispatch it */ 1537 1538 trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, 1539 tio->io->bio->bi_bdev->bd_dev, sector); 1540 1541 generic_make_request(clone); 1542 } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 1543 /* error the io and bail out, or requeue it if needed */ 1544 dec_pending(tio->io, r); 1545 free_tio(tio); 1546 } else if (r != DM_MAPIO_SUBMITTED) { 1547 DMWARN("unimplemented target map return value: %d", r); 1548 BUG(); 1549 } 1550 } 1551 1552 struct clone_info { 1553 struct mapped_device *md; 1554 struct dm_table *map; 1555 struct bio *bio; 1556 struct dm_io *io; 1557 sector_t sector; 1558 unsigned sector_count; 1559 }; 1560 1561 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1562 { 1563 bio->bi_iter.bi_sector = sector; 1564 bio->bi_iter.bi_size = to_bytes(len); 1565 } 1566 1567 /* 1568 * Creates a bio that consists of range of complete bvecs. 1569 */ 1570 static int clone_bio(struct dm_target_io *tio, struct bio *bio, 1571 sector_t sector, unsigned len) 1572 { 1573 struct bio *clone = &tio->clone; 1574 1575 __bio_clone_fast(clone, bio); 1576 1577 if (bio_integrity(bio)) { 1578 int r = bio_integrity_clone(clone, bio, GFP_NOIO); 1579 if (r < 0) 1580 return r; 1581 } 1582 1583 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1584 clone->bi_iter.bi_size = to_bytes(len); 1585 1586 if (bio_integrity(bio)) 1587 bio_integrity_trim(clone, 0, len); 1588 1589 return 0; 1590 } 1591 1592 static struct dm_target_io *alloc_tio(struct clone_info *ci, 1593 struct dm_target *ti, 1594 unsigned target_bio_nr) 1595 { 1596 struct dm_target_io *tio; 1597 struct bio *clone; 1598 1599 clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); 1600 tio = container_of(clone, struct dm_target_io, clone); 1601 1602 tio->io = ci->io; 1603 tio->ti = ti; 1604 tio->target_bio_nr = target_bio_nr; 1605 1606 return tio; 1607 } 1608 1609 static void __clone_and_map_simple_bio(struct clone_info *ci, 1610 struct dm_target *ti, 1611 unsigned target_bio_nr, unsigned *len) 1612 { 1613 struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr); 1614 struct bio *clone = &tio->clone; 1615 1616 tio->len_ptr = len; 1617 1618 __bio_clone_fast(clone, ci->bio); 1619 if (len) 1620 bio_setup_sector(clone, ci->sector, *len); 1621 1622 __map_bio(tio); 1623 } 1624 1625 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1626 unsigned num_bios, unsigned *len) 1627 { 1628 unsigned target_bio_nr; 1629 1630 for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++) 1631 __clone_and_map_simple_bio(ci, ti, target_bio_nr, len); 1632 } 1633 1634 static int __send_empty_flush(struct clone_info *ci) 1635 { 1636 unsigned target_nr = 0; 1637 struct dm_target *ti; 1638 1639 BUG_ON(bio_has_data(ci->bio)); 1640 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1641 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1642 1643 return 0; 1644 } 1645 1646 static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 1647 sector_t sector, unsigned *len) 1648 { 1649 struct bio *bio = ci->bio; 1650 struct dm_target_io *tio; 1651 unsigned target_bio_nr; 1652 unsigned num_target_bios = 1; 1653 int r = 0; 1654 1655 /* 1656 * Does the target want to receive duplicate copies of the bio? 1657 */ 1658 if (bio_data_dir(bio) == WRITE && ti->num_write_bios) 1659 num_target_bios = ti->num_write_bios(ti, bio); 1660 1661 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 1662 tio = alloc_tio(ci, ti, target_bio_nr); 1663 tio->len_ptr = len; 1664 r = clone_bio(tio, bio, sector, *len); 1665 if (r < 0) { 1666 free_tio(tio); 1667 break; 1668 } 1669 __map_bio(tio); 1670 } 1671 1672 return r; 1673 } 1674 1675 typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 1676 1677 static unsigned get_num_discard_bios(struct dm_target *ti) 1678 { 1679 return ti->num_discard_bios; 1680 } 1681 1682 static unsigned get_num_write_same_bios(struct dm_target *ti) 1683 { 1684 return ti->num_write_same_bios; 1685 } 1686 1687 typedef bool (*is_split_required_fn)(struct dm_target *ti); 1688 1689 static bool is_split_required_for_discard(struct dm_target *ti) 1690 { 1691 return ti->split_discard_bios; 1692 } 1693 1694 static int __send_changing_extent_only(struct clone_info *ci, 1695 get_num_bios_fn get_num_bios, 1696 is_split_required_fn is_split_required) 1697 { 1698 struct dm_target *ti; 1699 unsigned len; 1700 unsigned num_bios; 1701 1702 do { 1703 ti = dm_table_find_target(ci->map, ci->sector); 1704 if (!dm_target_is_valid(ti)) 1705 return -EIO; 1706 1707 /* 1708 * Even though the device advertised support for this type of 1709 * request, that does not mean every target supports it, and 1710 * reconfiguration might also have changed that since the 1711 * check was performed. 1712 */ 1713 num_bios = get_num_bios ? get_num_bios(ti) : 0; 1714 if (!num_bios) 1715 return -EOPNOTSUPP; 1716 1717 if (is_split_required && !is_split_required(ti)) 1718 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 1719 else 1720 len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 1721 1722 __send_duplicate_bios(ci, ti, num_bios, &len); 1723 1724 ci->sector += len; 1725 } while (ci->sector_count -= len); 1726 1727 return 0; 1728 } 1729 1730 static int __send_discard(struct clone_info *ci) 1731 { 1732 return __send_changing_extent_only(ci, get_num_discard_bios, 1733 is_split_required_for_discard); 1734 } 1735 1736 static int __send_write_same(struct clone_info *ci) 1737 { 1738 return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); 1739 } 1740 1741 /* 1742 * Select the correct strategy for processing a non-flush bio. 1743 */ 1744 static int __split_and_process_non_flush(struct clone_info *ci) 1745 { 1746 struct bio *bio = ci->bio; 1747 struct dm_target *ti; 1748 unsigned len; 1749 int r; 1750 1751 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) 1752 return __send_discard(ci); 1753 else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) 1754 return __send_write_same(ci); 1755 1756 ti = dm_table_find_target(ci->map, ci->sector); 1757 if (!dm_target_is_valid(ti)) 1758 return -EIO; 1759 1760 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1761 1762 r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1763 if (r < 0) 1764 return r; 1765 1766 ci->sector += len; 1767 ci->sector_count -= len; 1768 1769 return 0; 1770 } 1771 1772 /* 1773 * Entry point to split a bio into clones and submit them to the targets. 1774 */ 1775 static void __split_and_process_bio(struct mapped_device *md, 1776 struct dm_table *map, struct bio *bio) 1777 { 1778 struct clone_info ci; 1779 int error = 0; 1780 1781 if (unlikely(!map)) { 1782 bio_io_error(bio); 1783 return; 1784 } 1785 1786 ci.map = map; 1787 ci.md = md; 1788 ci.io = alloc_io(md); 1789 ci.io->error = 0; 1790 atomic_set(&ci.io->io_count, 1); 1791 ci.io->bio = bio; 1792 ci.io->md = md; 1793 spin_lock_init(&ci.io->endio_lock); 1794 ci.sector = bio->bi_iter.bi_sector; 1795 1796 start_io_acct(ci.io); 1797 1798 if (bio->bi_rw & REQ_PREFLUSH) { 1799 ci.bio = &ci.md->flush_bio; 1800 ci.sector_count = 0; 1801 error = __send_empty_flush(&ci); 1802 /* dec_pending submits any data associated with flush */ 1803 } else { 1804 ci.bio = bio; 1805 ci.sector_count = bio_sectors(bio); 1806 while (ci.sector_count && !error) 1807 error = __split_and_process_non_flush(&ci); 1808 } 1809 1810 /* drop the extra reference count */ 1811 dec_pending(ci.io, error); 1812 } 1813 /*----------------------------------------------------------------- 1814 * CRUD END 1815 *---------------------------------------------------------------*/ 1816 1817 /* 1818 * The request function that just remaps the bio built up by 1819 * dm_merge_bvec. 1820 */ 1821 static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) 1822 { 1823 int rw = bio_data_dir(bio); 1824 struct mapped_device *md = q->queuedata; 1825 int srcu_idx; 1826 struct dm_table *map; 1827 1828 map = dm_get_live_table(md, &srcu_idx); 1829 1830 generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0); 1831 1832 /* if we're suspended, we have to queue this io for later */ 1833 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 1834 dm_put_live_table(md, srcu_idx); 1835 1836 if (!(bio->bi_rw & REQ_RAHEAD)) 1837 queue_io(md, bio); 1838 else 1839 bio_io_error(bio); 1840 return BLK_QC_T_NONE; 1841 } 1842 1843 __split_and_process_bio(md, map, bio); 1844 dm_put_live_table(md, srcu_idx); 1845 return BLK_QC_T_NONE; 1846 } 1847 1848 int dm_request_based(struct mapped_device *md) 1849 { 1850 return blk_queue_stackable(md->queue); 1851 } 1852 1853 static void dm_dispatch_clone_request(struct request *clone, struct request *rq) 1854 { 1855 int r; 1856 1857 if (blk_queue_io_stat(clone->q)) 1858 clone->cmd_flags |= REQ_IO_STAT; 1859 1860 clone->start_time = jiffies; 1861 r = blk_insert_cloned_request(clone->q, clone); 1862 if (r) 1863 /* must complete clone in terms of original request */ 1864 dm_complete_request(rq, r); 1865 } 1866 1867 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, 1868 void *data) 1869 { 1870 struct dm_rq_target_io *tio = data; 1871 struct dm_rq_clone_bio_info *info = 1872 container_of(bio, struct dm_rq_clone_bio_info, clone); 1873 1874 info->orig = bio_orig; 1875 info->tio = tio; 1876 bio->bi_end_io = end_clone_bio; 1877 1878 return 0; 1879 } 1880 1881 static int setup_clone(struct request *clone, struct request *rq, 1882 struct dm_rq_target_io *tio, gfp_t gfp_mask) 1883 { 1884 int r; 1885 1886 r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, 1887 dm_rq_bio_constructor, tio); 1888 if (r) 1889 return r; 1890 1891 clone->cmd = rq->cmd; 1892 clone->cmd_len = rq->cmd_len; 1893 clone->sense = rq->sense; 1894 clone->end_io = end_clone_request; 1895 clone->end_io_data = tio; 1896 1897 tio->clone = clone; 1898 1899 return 0; 1900 } 1901 1902 static struct request *clone_old_rq(struct request *rq, struct mapped_device *md, 1903 struct dm_rq_target_io *tio, gfp_t gfp_mask) 1904 { 1905 /* 1906 * Create clone for use with .request_fn request_queue 1907 */ 1908 struct request *clone; 1909 1910 clone = alloc_old_clone_request(md, gfp_mask); 1911 if (!clone) 1912 return NULL; 1913 1914 blk_rq_init(NULL, clone); 1915 if (setup_clone(clone, rq, tio, gfp_mask)) { 1916 /* -ENOMEM */ 1917 free_old_clone_request(md, clone); 1918 return NULL; 1919 } 1920 1921 return clone; 1922 } 1923 1924 static void map_tio_request(struct kthread_work *work); 1925 1926 static void init_tio(struct dm_rq_target_io *tio, struct request *rq, 1927 struct mapped_device *md) 1928 { 1929 tio->md = md; 1930 tio->ti = NULL; 1931 tio->clone = NULL; 1932 tio->orig = rq; 1933 tio->error = 0; 1934 /* 1935 * Avoid initializing info for blk-mq; it passes 1936 * target-specific data through info.ptr 1937 * (see: dm_mq_init_request) 1938 */ 1939 if (!md->init_tio_pdu) 1940 memset(&tio->info, 0, sizeof(tio->info)); 1941 if (md->kworker_task) 1942 init_kthread_work(&tio->work, map_tio_request); 1943 } 1944 1945 static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq, 1946 struct mapped_device *md, 1947 gfp_t gfp_mask) 1948 { 1949 struct dm_rq_target_io *tio; 1950 int srcu_idx; 1951 struct dm_table *table; 1952 1953 tio = alloc_old_rq_tio(md, gfp_mask); 1954 if (!tio) 1955 return NULL; 1956 1957 init_tio(tio, rq, md); 1958 1959 table = dm_get_live_table(md, &srcu_idx); 1960 /* 1961 * Must clone a request if this .request_fn DM device 1962 * is stacked on .request_fn device(s). 1963 */ 1964 if (!dm_table_mq_request_based(table)) { 1965 if (!clone_old_rq(rq, md, tio, gfp_mask)) { 1966 dm_put_live_table(md, srcu_idx); 1967 free_old_rq_tio(tio); 1968 return NULL; 1969 } 1970 } 1971 dm_put_live_table(md, srcu_idx); 1972 1973 return tio; 1974 } 1975 1976 /* 1977 * Called with the queue lock held. 1978 */ 1979 static int dm_old_prep_fn(struct request_queue *q, struct request *rq) 1980 { 1981 struct mapped_device *md = q->queuedata; 1982 struct dm_rq_target_io *tio; 1983 1984 if (unlikely(rq->special)) { 1985 DMWARN("Already has something in rq->special."); 1986 return BLKPREP_KILL; 1987 } 1988 1989 tio = dm_old_prep_tio(rq, md, GFP_ATOMIC); 1990 if (!tio) 1991 return BLKPREP_DEFER; 1992 1993 rq->special = tio; 1994 rq->cmd_flags |= REQ_DONTPREP; 1995 1996 return BLKPREP_OK; 1997 } 1998 1999 /* 2000 * Returns: 2001 * 0 : the request has been processed 2002 * DM_MAPIO_REQUEUE : the original request needs to be requeued 2003 * < 0 : the request was completed due to failure 2004 */ 2005 static int map_request(struct dm_rq_target_io *tio, struct request *rq, 2006 struct mapped_device *md) 2007 { 2008 int r; 2009 struct dm_target *ti = tio->ti; 2010 struct request *clone = NULL; 2011 2012 if (tio->clone) { 2013 clone = tio->clone; 2014 r = ti->type->map_rq(ti, clone, &tio->info); 2015 } else { 2016 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); 2017 if (r < 0) { 2018 /* The target wants to complete the I/O */ 2019 dm_kill_unmapped_request(rq, r); 2020 return r; 2021 } 2022 if (r != DM_MAPIO_REMAPPED) 2023 return r; 2024 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { 2025 /* -ENOMEM */ 2026 ti->type->release_clone_rq(clone); 2027 return DM_MAPIO_REQUEUE; 2028 } 2029 } 2030 2031 switch (r) { 2032 case DM_MAPIO_SUBMITTED: 2033 /* The target has taken the I/O to submit by itself later */ 2034 break; 2035 case DM_MAPIO_REMAPPED: 2036 /* The target has remapped the I/O so dispatch it */ 2037 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 2038 blk_rq_pos(rq)); 2039 dm_dispatch_clone_request(clone, rq); 2040 break; 2041 case DM_MAPIO_REQUEUE: 2042 /* The target wants to requeue the I/O */ 2043 dm_requeue_original_request(md, tio->orig); 2044 break; 2045 default: 2046 if (r > 0) { 2047 DMWARN("unimplemented target map return value: %d", r); 2048 BUG(); 2049 } 2050 2051 /* The target wants to complete the I/O */ 2052 dm_kill_unmapped_request(rq, r); 2053 return r; 2054 } 2055 2056 return 0; 2057 } 2058 2059 static void map_tio_request(struct kthread_work *work) 2060 { 2061 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); 2062 struct request *rq = tio->orig; 2063 struct mapped_device *md = tio->md; 2064 2065 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) 2066 dm_requeue_original_request(md, rq); 2067 } 2068 2069 static void dm_start_request(struct mapped_device *md, struct request *orig) 2070 { 2071 if (!orig->q->mq_ops) 2072 blk_start_request(orig); 2073 else 2074 blk_mq_start_request(orig); 2075 atomic_inc(&md->pending[rq_data_dir(orig)]); 2076 2077 if (md->seq_rq_merge_deadline_usecs) { 2078 md->last_rq_pos = rq_end_sector(orig); 2079 md->last_rq_rw = rq_data_dir(orig); 2080 md->last_rq_start_time = ktime_get(); 2081 } 2082 2083 if (unlikely(dm_stats_used(&md->stats))) { 2084 struct dm_rq_target_io *tio = tio_from_request(orig); 2085 tio->duration_jiffies = jiffies; 2086 tio->n_sectors = blk_rq_sectors(orig); 2087 dm_stats_account_io(&md->stats, rq_data_dir(orig), 2088 blk_rq_pos(orig), tio->n_sectors, false, 0, 2089 &tio->stats_aux); 2090 } 2091 2092 /* 2093 * Hold the md reference here for the in-flight I/O. 2094 * We can't rely on the reference count by device opener, 2095 * because the device may be closed during the request completion 2096 * when all bios are completed. 2097 * See the comment in rq_completed() too. 2098 */ 2099 dm_get(md); 2100 } 2101 2102 #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000 2103 2104 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) 2105 { 2106 return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs); 2107 } 2108 2109 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, 2110 const char *buf, size_t count) 2111 { 2112 unsigned deadline; 2113 2114 if (!dm_request_based(md) || md->use_blk_mq) 2115 return count; 2116 2117 if (kstrtouint(buf, 10, &deadline)) 2118 return -EINVAL; 2119 2120 if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS) 2121 deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS; 2122 2123 md->seq_rq_merge_deadline_usecs = deadline; 2124 2125 return count; 2126 } 2127 2128 static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md) 2129 { 2130 ktime_t kt_deadline; 2131 2132 if (!md->seq_rq_merge_deadline_usecs) 2133 return false; 2134 2135 kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC); 2136 kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline); 2137 2138 return !ktime_after(ktime_get(), kt_deadline); 2139 } 2140 2141 /* 2142 * q->request_fn for request-based dm. 2143 * Called with the queue lock held. 2144 */ 2145 static void dm_request_fn(struct request_queue *q) 2146 { 2147 struct mapped_device *md = q->queuedata; 2148 struct dm_target *ti = md->immutable_target; 2149 struct request *rq; 2150 struct dm_rq_target_io *tio; 2151 sector_t pos = 0; 2152 2153 if (unlikely(!ti)) { 2154 int srcu_idx; 2155 struct dm_table *map = dm_get_live_table(md, &srcu_idx); 2156 2157 ti = dm_table_find_target(map, pos); 2158 dm_put_live_table(md, srcu_idx); 2159 } 2160 2161 /* 2162 * For suspend, check blk_queue_stopped() and increment 2163 * ->pending within a single queue_lock not to increment the 2164 * number of in-flight I/Os after the queue is stopped in 2165 * dm_suspend(). 2166 */ 2167 while (!blk_queue_stopped(q)) { 2168 rq = blk_peek_request(q); 2169 if (!rq) 2170 return; 2171 2172 /* always use block 0 to find the target for flushes for now */ 2173 pos = 0; 2174 if (req_op(rq) != REQ_OP_FLUSH) 2175 pos = blk_rq_pos(rq); 2176 2177 if ((dm_request_peeked_before_merge_deadline(md) && 2178 md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 && 2179 md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) || 2180 (ti->type->busy && ti->type->busy(ti))) { 2181 blk_delay_queue(q, HZ / 100); 2182 return; 2183 } 2184 2185 dm_start_request(md, rq); 2186 2187 tio = tio_from_request(rq); 2188 /* Establish tio->ti before queuing work (map_tio_request) */ 2189 tio->ti = ti; 2190 queue_kthread_work(&md->kworker, &tio->work); 2191 BUG_ON(!irqs_disabled()); 2192 } 2193 } 2194 2195 static int dm_any_congested(void *congested_data, int bdi_bits) 2196 { 2197 int r = bdi_bits; 2198 struct mapped_device *md = congested_data; 2199 struct dm_table *map; 2200 2201 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2202 if (dm_request_based(md)) { 2203 /* 2204 * With request-based DM we only need to check the 2205 * top-level queue for congestion. 2206 */ 2207 r = md->queue->backing_dev_info.wb.state & bdi_bits; 2208 } else { 2209 map = dm_get_live_table_fast(md); 2210 if (map) 2211 r = dm_table_any_congested(map, bdi_bits); 2212 dm_put_live_table_fast(md); 2213 } 2214 } 2215 2216 return r; 2217 } 2218 2219 /*----------------------------------------------------------------- 2220 * An IDR is used to keep track of allocated minor numbers. 2221 *---------------------------------------------------------------*/ 2222 static void free_minor(int minor) 2223 { 2224 spin_lock(&_minor_lock); 2225 idr_remove(&_minor_idr, minor); 2226 spin_unlock(&_minor_lock); 2227 } 2228 2229 /* 2230 * See if the device with a specific minor # is free. 2231 */ 2232 static int specific_minor(int minor) 2233 { 2234 int r; 2235 2236 if (minor >= (1 << MINORBITS)) 2237 return -EINVAL; 2238 2239 idr_preload(GFP_KERNEL); 2240 spin_lock(&_minor_lock); 2241 2242 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 2243 2244 spin_unlock(&_minor_lock); 2245 idr_preload_end(); 2246 if (r < 0) 2247 return r == -ENOSPC ? -EBUSY : r; 2248 return 0; 2249 } 2250 2251 static int next_free_minor(int *minor) 2252 { 2253 int r; 2254 2255 idr_preload(GFP_KERNEL); 2256 spin_lock(&_minor_lock); 2257 2258 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 2259 2260 spin_unlock(&_minor_lock); 2261 idr_preload_end(); 2262 if (r < 0) 2263 return r; 2264 *minor = r; 2265 return 0; 2266 } 2267 2268 static const struct block_device_operations dm_blk_dops; 2269 2270 static void dm_wq_work(struct work_struct *work); 2271 2272 static void dm_init_md_queue(struct mapped_device *md) 2273 { 2274 /* 2275 * Request-based dm devices cannot be stacked on top of bio-based dm 2276 * devices. The type of this dm device may not have been decided yet. 2277 * The type is decided at the first table loading time. 2278 * To prevent problematic device stacking, clear the queue flag 2279 * for request stacking support until then. 2280 * 2281 * This queue is new, so no concurrency on the queue_flags. 2282 */ 2283 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 2284 2285 /* 2286 * Initialize data that will only be used by a non-blk-mq DM queue 2287 * - must do so here (in alloc_dev callchain) before queue is used 2288 */ 2289 md->queue->queuedata = md; 2290 md->queue->backing_dev_info.congested_data = md; 2291 } 2292 2293 static void dm_init_normal_md_queue(struct mapped_device *md) 2294 { 2295 md->use_blk_mq = false; 2296 dm_init_md_queue(md); 2297 2298 /* 2299 * Initialize aspects of queue that aren't relevant for blk-mq 2300 */ 2301 md->queue->backing_dev_info.congested_fn = dm_any_congested; 2302 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 2303 } 2304 2305 static void cleanup_mapped_device(struct mapped_device *md) 2306 { 2307 if (md->wq) 2308 destroy_workqueue(md->wq); 2309 if (md->kworker_task) 2310 kthread_stop(md->kworker_task); 2311 mempool_destroy(md->io_pool); 2312 mempool_destroy(md->rq_pool); 2313 if (md->bs) 2314 bioset_free(md->bs); 2315 2316 cleanup_srcu_struct(&md->io_barrier); 2317 2318 if (md->disk) { 2319 spin_lock(&_minor_lock); 2320 md->disk->private_data = NULL; 2321 spin_unlock(&_minor_lock); 2322 del_gendisk(md->disk); 2323 put_disk(md->disk); 2324 } 2325 2326 if (md->queue) 2327 blk_cleanup_queue(md->queue); 2328 2329 if (md->bdev) { 2330 bdput(md->bdev); 2331 md->bdev = NULL; 2332 } 2333 } 2334 2335 /* 2336 * Allocate and initialise a blank device with a given minor. 2337 */ 2338 static struct mapped_device *alloc_dev(int minor) 2339 { 2340 int r, numa_node_id = dm_get_numa_node(); 2341 struct mapped_device *md; 2342 void *old_md; 2343 2344 md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 2345 if (!md) { 2346 DMWARN("unable to allocate device, out of memory."); 2347 return NULL; 2348 } 2349 2350 if (!try_module_get(THIS_MODULE)) 2351 goto bad_module_get; 2352 2353 /* get a minor number for the dev */ 2354 if (minor == DM_ANY_MINOR) 2355 r = next_free_minor(&minor); 2356 else 2357 r = specific_minor(minor); 2358 if (r < 0) 2359 goto bad_minor; 2360 2361 r = init_srcu_struct(&md->io_barrier); 2362 if (r < 0) 2363 goto bad_io_barrier; 2364 2365 md->numa_node_id = numa_node_id; 2366 md->use_blk_mq = use_blk_mq; 2367 md->init_tio_pdu = false; 2368 md->type = DM_TYPE_NONE; 2369 mutex_init(&md->suspend_lock); 2370 mutex_init(&md->type_lock); 2371 mutex_init(&md->table_devices_lock); 2372 spin_lock_init(&md->deferred_lock); 2373 atomic_set(&md->holders, 1); 2374 atomic_set(&md->open_count, 0); 2375 atomic_set(&md->event_nr, 0); 2376 atomic_set(&md->uevent_seq, 0); 2377 INIT_LIST_HEAD(&md->uevent_list); 2378 INIT_LIST_HEAD(&md->table_devices); 2379 spin_lock_init(&md->uevent_lock); 2380 2381 md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); 2382 if (!md->queue) 2383 goto bad; 2384 2385 dm_init_md_queue(md); 2386 2387 md->disk = alloc_disk_node(1, numa_node_id); 2388 if (!md->disk) 2389 goto bad; 2390 2391 atomic_set(&md->pending[0], 0); 2392 atomic_set(&md->pending[1], 0); 2393 init_waitqueue_head(&md->wait); 2394 INIT_WORK(&md->work, dm_wq_work); 2395 init_waitqueue_head(&md->eventq); 2396 init_completion(&md->kobj_holder.completion); 2397 md->kworker_task = NULL; 2398 2399 md->disk->major = _major; 2400 md->disk->first_minor = minor; 2401 md->disk->fops = &dm_blk_dops; 2402 md->disk->queue = md->queue; 2403 md->disk->private_data = md; 2404 sprintf(md->disk->disk_name, "dm-%d", minor); 2405 add_disk(md->disk); 2406 format_dev_t(md->name, MKDEV(_major, minor)); 2407 2408 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 2409 if (!md->wq) 2410 goto bad; 2411 2412 md->bdev = bdget_disk(md->disk, 0); 2413 if (!md->bdev) 2414 goto bad; 2415 2416 bio_init(&md->flush_bio); 2417 md->flush_bio.bi_bdev = md->bdev; 2418 bio_set_op_attrs(&md->flush_bio, REQ_OP_WRITE, WRITE_FLUSH); 2419 2420 dm_stats_init(&md->stats); 2421 2422 /* Populate the mapping, nobody knows we exist yet */ 2423 spin_lock(&_minor_lock); 2424 old_md = idr_replace(&_minor_idr, md, minor); 2425 spin_unlock(&_minor_lock); 2426 2427 BUG_ON(old_md != MINOR_ALLOCED); 2428 2429 return md; 2430 2431 bad: 2432 cleanup_mapped_device(md); 2433 bad_io_barrier: 2434 free_minor(minor); 2435 bad_minor: 2436 module_put(THIS_MODULE); 2437 bad_module_get: 2438 kfree(md); 2439 return NULL; 2440 } 2441 2442 static void unlock_fs(struct mapped_device *md); 2443 2444 static void free_dev(struct mapped_device *md) 2445 { 2446 int minor = MINOR(disk_devt(md->disk)); 2447 2448 unlock_fs(md); 2449 2450 cleanup_mapped_device(md); 2451 if (md->tag_set) { 2452 blk_mq_free_tag_set(md->tag_set); 2453 kfree(md->tag_set); 2454 } 2455 2456 free_table_devices(&md->table_devices); 2457 dm_stats_cleanup(&md->stats); 2458 free_minor(minor); 2459 2460 module_put(THIS_MODULE); 2461 kfree(md); 2462 } 2463 2464 static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 2465 { 2466 struct dm_md_mempools *p = dm_table_get_md_mempools(t); 2467 2468 if (md->bs) { 2469 /* The md already has necessary mempools. */ 2470 if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) { 2471 /* 2472 * Reload bioset because front_pad may have changed 2473 * because a different table was loaded. 2474 */ 2475 bioset_free(md->bs); 2476 md->bs = p->bs; 2477 p->bs = NULL; 2478 } 2479 /* 2480 * There's no need to reload with request-based dm 2481 * because the size of front_pad doesn't change. 2482 * Note for future: If you are to reload bioset, 2483 * prep-ed requests in the queue may refer 2484 * to bio from the old bioset, so you must walk 2485 * through the queue to unprep. 2486 */ 2487 goto out; 2488 } 2489 2490 BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); 2491 2492 md->io_pool = p->io_pool; 2493 p->io_pool = NULL; 2494 md->rq_pool = p->rq_pool; 2495 p->rq_pool = NULL; 2496 md->bs = p->bs; 2497 p->bs = NULL; 2498 2499 out: 2500 /* mempool bind completed, no longer need any mempools in the table */ 2501 dm_table_free_md_mempools(t); 2502 } 2503 2504 /* 2505 * Bind a table to the device. 2506 */ 2507 static void event_callback(void *context) 2508 { 2509 unsigned long flags; 2510 LIST_HEAD(uevents); 2511 struct mapped_device *md = (struct mapped_device *) context; 2512 2513 spin_lock_irqsave(&md->uevent_lock, flags); 2514 list_splice_init(&md->uevent_list, &uevents); 2515 spin_unlock_irqrestore(&md->uevent_lock, flags); 2516 2517 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 2518 2519 atomic_inc(&md->event_nr); 2520 wake_up(&md->eventq); 2521 } 2522 2523 /* 2524 * Protected by md->suspend_lock obtained by dm_swap_table(). 2525 */ 2526 static void __set_size(struct mapped_device *md, sector_t size) 2527 { 2528 set_capacity(md->disk, size); 2529 2530 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 2531 } 2532 2533 /* 2534 * Returns old map, which caller must destroy. 2535 */ 2536 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2537 struct queue_limits *limits) 2538 { 2539 struct dm_table *old_map; 2540 struct request_queue *q = md->queue; 2541 sector_t size; 2542 2543 size = dm_table_get_size(t); 2544 2545 /* 2546 * Wipe any geometry if the size of the table changed. 2547 */ 2548 if (size != dm_get_size(md)) 2549 memset(&md->geometry, 0, sizeof(md->geometry)); 2550 2551 __set_size(md, size); 2552 2553 dm_table_event_callback(t, event_callback, md); 2554 2555 /* 2556 * The queue hasn't been stopped yet, if the old table type wasn't 2557 * for request-based during suspension. So stop it to prevent 2558 * I/O mapping before resume. 2559 * This must be done before setting the queue restrictions, 2560 * because request-based dm may be run just after the setting. 2561 */ 2562 if (dm_table_request_based(t)) { 2563 dm_stop_queue(q); 2564 /* 2565 * Leverage the fact that request-based DM targets are 2566 * immutable singletons and establish md->immutable_target 2567 * - used to optimize both dm_request_fn and dm_mq_queue_rq 2568 */ 2569 md->immutable_target = dm_table_get_immutable_target(t); 2570 } 2571 2572 __bind_mempools(md, t); 2573 2574 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2575 rcu_assign_pointer(md->map, (void *)t); 2576 md->immutable_target_type = dm_table_get_immutable_target_type(t); 2577 2578 dm_table_set_restrictions(t, q, limits); 2579 if (old_map) 2580 dm_sync_table(md); 2581 2582 return old_map; 2583 } 2584 2585 /* 2586 * Returns unbound table for the caller to free. 2587 */ 2588 static struct dm_table *__unbind(struct mapped_device *md) 2589 { 2590 struct dm_table *map = rcu_dereference_protected(md->map, 1); 2591 2592 if (!map) 2593 return NULL; 2594 2595 dm_table_event_callback(map, NULL, NULL); 2596 RCU_INIT_POINTER(md->map, NULL); 2597 dm_sync_table(md); 2598 2599 return map; 2600 } 2601 2602 /* 2603 * Constructor for a new device. 2604 */ 2605 int dm_create(int minor, struct mapped_device **result) 2606 { 2607 struct mapped_device *md; 2608 2609 md = alloc_dev(minor); 2610 if (!md) 2611 return -ENXIO; 2612 2613 dm_sysfs_init(md); 2614 2615 *result = md; 2616 return 0; 2617 } 2618 2619 /* 2620 * Functions to manage md->type. 2621 * All are required to hold md->type_lock. 2622 */ 2623 void dm_lock_md_type(struct mapped_device *md) 2624 { 2625 mutex_lock(&md->type_lock); 2626 } 2627 2628 void dm_unlock_md_type(struct mapped_device *md) 2629 { 2630 mutex_unlock(&md->type_lock); 2631 } 2632 2633 void dm_set_md_type(struct mapped_device *md, unsigned type) 2634 { 2635 BUG_ON(!mutex_is_locked(&md->type_lock)); 2636 md->type = type; 2637 } 2638 2639 unsigned dm_get_md_type(struct mapped_device *md) 2640 { 2641 return md->type; 2642 } 2643 2644 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 2645 { 2646 return md->immutable_target_type; 2647 } 2648 2649 /* 2650 * The queue_limits are only valid as long as you have a reference 2651 * count on 'md'. 2652 */ 2653 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2654 { 2655 BUG_ON(!atomic_read(&md->holders)); 2656 return &md->queue->limits; 2657 } 2658 EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2659 2660 static void dm_old_init_rq_based_worker_thread(struct mapped_device *md) 2661 { 2662 /* Initialize the request-based DM worker thread */ 2663 init_kthread_worker(&md->kworker); 2664 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, 2665 "kdmwork-%s", dm_device_name(md)); 2666 } 2667 2668 /* 2669 * Fully initialize a .request_fn request-based queue. 2670 */ 2671 static int dm_old_init_request_queue(struct mapped_device *md) 2672 { 2673 /* Fully initialize the queue */ 2674 if (!blk_init_allocated_queue(md->queue, dm_request_fn, NULL)) 2675 return -EINVAL; 2676 2677 /* disable dm_request_fn's merge heuristic by default */ 2678 md->seq_rq_merge_deadline_usecs = 0; 2679 2680 dm_init_normal_md_queue(md); 2681 blk_queue_softirq_done(md->queue, dm_softirq_done); 2682 blk_queue_prep_rq(md->queue, dm_old_prep_fn); 2683 2684 dm_old_init_rq_based_worker_thread(md); 2685 2686 elv_register_queue(md->queue); 2687 2688 return 0; 2689 } 2690 2691 static int dm_mq_init_request(void *data, struct request *rq, 2692 unsigned int hctx_idx, unsigned int request_idx, 2693 unsigned int numa_node) 2694 { 2695 struct mapped_device *md = data; 2696 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); 2697 2698 /* 2699 * Must initialize md member of tio, otherwise it won't 2700 * be available in dm_mq_queue_rq. 2701 */ 2702 tio->md = md; 2703 2704 if (md->init_tio_pdu) { 2705 /* target-specific per-io data is immediately after the tio */ 2706 tio->info.ptr = tio + 1; 2707 } 2708 2709 return 0; 2710 } 2711 2712 static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 2713 const struct blk_mq_queue_data *bd) 2714 { 2715 struct request *rq = bd->rq; 2716 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); 2717 struct mapped_device *md = tio->md; 2718 struct dm_target *ti = md->immutable_target; 2719 2720 if (unlikely(!ti)) { 2721 int srcu_idx; 2722 struct dm_table *map = dm_get_live_table(md, &srcu_idx); 2723 2724 ti = dm_table_find_target(map, 0); 2725 dm_put_live_table(md, srcu_idx); 2726 } 2727 2728 if (ti->type->busy && ti->type->busy(ti)) 2729 return BLK_MQ_RQ_QUEUE_BUSY; 2730 2731 dm_start_request(md, rq); 2732 2733 /* Init tio using md established in .init_request */ 2734 init_tio(tio, rq, md); 2735 2736 /* 2737 * Establish tio->ti before queuing work (map_tio_request) 2738 * or making direct call to map_request(). 2739 */ 2740 tio->ti = ti; 2741 2742 /* Direct call is fine since .queue_rq allows allocations */ 2743 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { 2744 /* Undo dm_start_request() before requeuing */ 2745 rq_end_stats(md, rq); 2746 rq_completed(md, rq_data_dir(rq), false); 2747 return BLK_MQ_RQ_QUEUE_BUSY; 2748 } 2749 2750 return BLK_MQ_RQ_QUEUE_OK; 2751 } 2752 2753 static struct blk_mq_ops dm_mq_ops = { 2754 .queue_rq = dm_mq_queue_rq, 2755 .map_queue = blk_mq_map_queue, 2756 .complete = dm_softirq_done, 2757 .init_request = dm_mq_init_request, 2758 }; 2759 2760 static int dm_mq_init_request_queue(struct mapped_device *md, 2761 struct dm_target *immutable_tgt) 2762 { 2763 struct request_queue *q; 2764 int err; 2765 2766 if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) { 2767 DMERR("request-based dm-mq may only be stacked on blk-mq device(s)"); 2768 return -EINVAL; 2769 } 2770 2771 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); 2772 if (!md->tag_set) 2773 return -ENOMEM; 2774 2775 md->tag_set->ops = &dm_mq_ops; 2776 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); 2777 md->tag_set->numa_node = md->numa_node_id; 2778 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 2779 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); 2780 md->tag_set->driver_data = md; 2781 2782 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); 2783 if (immutable_tgt && immutable_tgt->per_io_data_size) { 2784 /* any target-specific per-io data is immediately after the tio */ 2785 md->tag_set->cmd_size += immutable_tgt->per_io_data_size; 2786 md->init_tio_pdu = true; 2787 } 2788 2789 err = blk_mq_alloc_tag_set(md->tag_set); 2790 if (err) 2791 goto out_kfree_tag_set; 2792 2793 q = blk_mq_init_allocated_queue(md->tag_set, md->queue); 2794 if (IS_ERR(q)) { 2795 err = PTR_ERR(q); 2796 goto out_tag_set; 2797 } 2798 dm_init_md_queue(md); 2799 2800 /* backfill 'mq' sysfs registration normally done in blk_register_queue */ 2801 blk_mq_register_disk(md->disk); 2802 2803 return 0; 2804 2805 out_tag_set: 2806 blk_mq_free_tag_set(md->tag_set); 2807 out_kfree_tag_set: 2808 kfree(md->tag_set); 2809 2810 return err; 2811 } 2812 2813 static unsigned filter_md_type(unsigned type, struct mapped_device *md) 2814 { 2815 if (type == DM_TYPE_BIO_BASED) 2816 return type; 2817 2818 return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED; 2819 } 2820 2821 /* 2822 * Setup the DM device's queue based on md's type 2823 */ 2824 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 2825 { 2826 int r; 2827 unsigned md_type = filter_md_type(dm_get_md_type(md), md); 2828 2829 switch (md_type) { 2830 case DM_TYPE_REQUEST_BASED: 2831 r = dm_old_init_request_queue(md); 2832 if (r) { 2833 DMERR("Cannot initialize queue for request-based mapped device"); 2834 return r; 2835 } 2836 break; 2837 case DM_TYPE_MQ_REQUEST_BASED: 2838 r = dm_mq_init_request_queue(md, dm_table_get_immutable_target(t)); 2839 if (r) { 2840 DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 2841 return r; 2842 } 2843 break; 2844 case DM_TYPE_BIO_BASED: 2845 dm_init_normal_md_queue(md); 2846 blk_queue_make_request(md->queue, dm_make_request); 2847 /* 2848 * DM handles splitting bios as needed. Free the bio_split bioset 2849 * since it won't be used (saves 1 process per bio-based DM device). 2850 */ 2851 bioset_free(md->queue->bio_split); 2852 md->queue->bio_split = NULL; 2853 break; 2854 } 2855 2856 return 0; 2857 } 2858 2859 struct mapped_device *dm_get_md(dev_t dev) 2860 { 2861 struct mapped_device *md; 2862 unsigned minor = MINOR(dev); 2863 2864 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 2865 return NULL; 2866 2867 spin_lock(&_minor_lock); 2868 2869 md = idr_find(&_minor_idr, minor); 2870 if (md) { 2871 if ((md == MINOR_ALLOCED || 2872 (MINOR(disk_devt(dm_disk(md))) != minor) || 2873 dm_deleting_md(md) || 2874 test_bit(DMF_FREEING, &md->flags))) { 2875 md = NULL; 2876 goto out; 2877 } 2878 dm_get(md); 2879 } 2880 2881 out: 2882 spin_unlock(&_minor_lock); 2883 2884 return md; 2885 } 2886 EXPORT_SYMBOL_GPL(dm_get_md); 2887 2888 void *dm_get_mdptr(struct mapped_device *md) 2889 { 2890 return md->interface_ptr; 2891 } 2892 2893 void dm_set_mdptr(struct mapped_device *md, void *ptr) 2894 { 2895 md->interface_ptr = ptr; 2896 } 2897 2898 void dm_get(struct mapped_device *md) 2899 { 2900 atomic_inc(&md->holders); 2901 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2902 } 2903 2904 int dm_hold(struct mapped_device *md) 2905 { 2906 spin_lock(&_minor_lock); 2907 if (test_bit(DMF_FREEING, &md->flags)) { 2908 spin_unlock(&_minor_lock); 2909 return -EBUSY; 2910 } 2911 dm_get(md); 2912 spin_unlock(&_minor_lock); 2913 return 0; 2914 } 2915 EXPORT_SYMBOL_GPL(dm_hold); 2916 2917 const char *dm_device_name(struct mapped_device *md) 2918 { 2919 return md->name; 2920 } 2921 EXPORT_SYMBOL_GPL(dm_device_name); 2922 2923 static void __dm_destroy(struct mapped_device *md, bool wait) 2924 { 2925 struct dm_table *map; 2926 int srcu_idx; 2927 2928 might_sleep(); 2929 2930 spin_lock(&_minor_lock); 2931 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2932 set_bit(DMF_FREEING, &md->flags); 2933 spin_unlock(&_minor_lock); 2934 2935 if (dm_request_based(md) && md->kworker_task) 2936 flush_kthread_worker(&md->kworker); 2937 2938 /* 2939 * Take suspend_lock so that presuspend and postsuspend methods 2940 * do not race with internal suspend. 2941 */ 2942 mutex_lock(&md->suspend_lock); 2943 map = dm_get_live_table(md, &srcu_idx); 2944 if (!dm_suspended_md(md)) { 2945 dm_table_presuspend_targets(map); 2946 dm_table_postsuspend_targets(map); 2947 } 2948 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2949 dm_put_live_table(md, srcu_idx); 2950 mutex_unlock(&md->suspend_lock); 2951 2952 /* 2953 * Rare, but there may be I/O requests still going to complete, 2954 * for example. Wait for all references to disappear. 2955 * No one should increment the reference count of the mapped_device, 2956 * after the mapped_device state becomes DMF_FREEING. 2957 */ 2958 if (wait) 2959 while (atomic_read(&md->holders)) 2960 msleep(1); 2961 else if (atomic_read(&md->holders)) 2962 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2963 dm_device_name(md), atomic_read(&md->holders)); 2964 2965 dm_sysfs_exit(md); 2966 dm_table_destroy(__unbind(md)); 2967 free_dev(md); 2968 } 2969 2970 void dm_destroy(struct mapped_device *md) 2971 { 2972 __dm_destroy(md, true); 2973 } 2974 2975 void dm_destroy_immediate(struct mapped_device *md) 2976 { 2977 __dm_destroy(md, false); 2978 } 2979 2980 void dm_put(struct mapped_device *md) 2981 { 2982 atomic_dec(&md->holders); 2983 } 2984 EXPORT_SYMBOL_GPL(dm_put); 2985 2986 static int dm_wait_for_completion(struct mapped_device *md, int interruptible) 2987 { 2988 int r = 0; 2989 DECLARE_WAITQUEUE(wait, current); 2990 2991 add_wait_queue(&md->wait, &wait); 2992 2993 while (1) { 2994 set_current_state(interruptible); 2995 2996 if (!md_in_flight(md)) 2997 break; 2998 2999 if (interruptible == TASK_INTERRUPTIBLE && 3000 signal_pending(current)) { 3001 r = -EINTR; 3002 break; 3003 } 3004 3005 io_schedule(); 3006 } 3007 set_current_state(TASK_RUNNING); 3008 3009 remove_wait_queue(&md->wait, &wait); 3010 3011 return r; 3012 } 3013 3014 /* 3015 * Process the deferred bios 3016 */ 3017 static void dm_wq_work(struct work_struct *work) 3018 { 3019 struct mapped_device *md = container_of(work, struct mapped_device, 3020 work); 3021 struct bio *c; 3022 int srcu_idx; 3023 struct dm_table *map; 3024 3025 map = dm_get_live_table(md, &srcu_idx); 3026 3027 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 3028 spin_lock_irq(&md->deferred_lock); 3029 c = bio_list_pop(&md->deferred); 3030 spin_unlock_irq(&md->deferred_lock); 3031 3032 if (!c) 3033 break; 3034 3035 if (dm_request_based(md)) 3036 generic_make_request(c); 3037 else 3038 __split_and_process_bio(md, map, c); 3039 } 3040 3041 dm_put_live_table(md, srcu_idx); 3042 } 3043 3044 static void dm_queue_flush(struct mapped_device *md) 3045 { 3046 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 3047 smp_mb__after_atomic(); 3048 queue_work(md->wq, &md->work); 3049 } 3050 3051 /* 3052 * Swap in a new table, returning the old one for the caller to destroy. 3053 */ 3054 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 3055 { 3056 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 3057 struct queue_limits limits; 3058 int r; 3059 3060 mutex_lock(&md->suspend_lock); 3061 3062 /* device must be suspended */ 3063 if (!dm_suspended_md(md)) 3064 goto out; 3065 3066 /* 3067 * If the new table has no data devices, retain the existing limits. 3068 * This helps multipath with queue_if_no_path if all paths disappear, 3069 * then new I/O is queued based on these limits, and then some paths 3070 * reappear. 3071 */ 3072 if (dm_table_has_no_data_devices(table)) { 3073 live_map = dm_get_live_table_fast(md); 3074 if (live_map) 3075 limits = md->queue->limits; 3076 dm_put_live_table_fast(md); 3077 } 3078 3079 if (!live_map) { 3080 r = dm_calculate_queue_limits(table, &limits); 3081 if (r) { 3082 map = ERR_PTR(r); 3083 goto out; 3084 } 3085 } 3086 3087 map = __bind(md, table, &limits); 3088 3089 out: 3090 mutex_unlock(&md->suspend_lock); 3091 return map; 3092 } 3093 3094 /* 3095 * Functions to lock and unlock any filesystem running on the 3096 * device. 3097 */ 3098 static int lock_fs(struct mapped_device *md) 3099 { 3100 int r; 3101 3102 WARN_ON(md->frozen_sb); 3103 3104 md->frozen_sb = freeze_bdev(md->bdev); 3105 if (IS_ERR(md->frozen_sb)) { 3106 r = PTR_ERR(md->frozen_sb); 3107 md->frozen_sb = NULL; 3108 return r; 3109 } 3110 3111 set_bit(DMF_FROZEN, &md->flags); 3112 3113 return 0; 3114 } 3115 3116 static void unlock_fs(struct mapped_device *md) 3117 { 3118 if (!test_bit(DMF_FROZEN, &md->flags)) 3119 return; 3120 3121 thaw_bdev(md->bdev, md->frozen_sb); 3122 md->frozen_sb = NULL; 3123 clear_bit(DMF_FROZEN, &md->flags); 3124 } 3125 3126 /* 3127 * If __dm_suspend returns 0, the device is completely quiescent 3128 * now. There is no request-processing activity. All new requests 3129 * are being added to md->deferred list. 3130 * 3131 * Caller must hold md->suspend_lock 3132 */ 3133 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 3134 unsigned suspend_flags, int interruptible) 3135 { 3136 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 3137 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 3138 int r; 3139 3140 /* 3141 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 3142 * This flag is cleared before dm_suspend returns. 3143 */ 3144 if (noflush) 3145 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 3146 3147 /* 3148 * This gets reverted if there's an error later and the targets 3149 * provide the .presuspend_undo hook. 3150 */ 3151 dm_table_presuspend_targets(map); 3152 3153 /* 3154 * Flush I/O to the device. 3155 * Any I/O submitted after lock_fs() may not be flushed. 3156 * noflush takes precedence over do_lockfs. 3157 * (lock_fs() flushes I/Os and waits for them to complete.) 3158 */ 3159 if (!noflush && do_lockfs) { 3160 r = lock_fs(md); 3161 if (r) { 3162 dm_table_presuspend_undo_targets(map); 3163 return r; 3164 } 3165 } 3166 3167 /* 3168 * Here we must make sure that no processes are submitting requests 3169 * to target drivers i.e. no one may be executing 3170 * __split_and_process_bio. This is called from dm_request and 3171 * dm_wq_work. 3172 * 3173 * To get all processes out of __split_and_process_bio in dm_request, 3174 * we take the write lock. To prevent any process from reentering 3175 * __split_and_process_bio from dm_request and quiesce the thread 3176 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 3177 * flush_workqueue(md->wq). 3178 */ 3179 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 3180 if (map) 3181 synchronize_srcu(&md->io_barrier); 3182 3183 /* 3184 * Stop md->queue before flushing md->wq in case request-based 3185 * dm defers requests to md->wq from md->queue. 3186 */ 3187 if (dm_request_based(md)) { 3188 dm_stop_queue(md->queue); 3189 if (md->kworker_task) 3190 flush_kthread_worker(&md->kworker); 3191 } 3192 3193 flush_workqueue(md->wq); 3194 3195 /* 3196 * At this point no more requests are entering target request routines. 3197 * We call dm_wait_for_completion to wait for all existing requests 3198 * to finish. 3199 */ 3200 r = dm_wait_for_completion(md, interruptible); 3201 3202 if (noflush) 3203 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 3204 if (map) 3205 synchronize_srcu(&md->io_barrier); 3206 3207 /* were we interrupted ? */ 3208 if (r < 0) { 3209 dm_queue_flush(md); 3210 3211 if (dm_request_based(md)) 3212 dm_start_queue(md->queue); 3213 3214 unlock_fs(md); 3215 dm_table_presuspend_undo_targets(map); 3216 /* pushback list is already flushed, so skip flush */ 3217 } 3218 3219 return r; 3220 } 3221 3222 /* 3223 * We need to be able to change a mapping table under a mounted 3224 * filesystem. For example we might want to move some data in 3225 * the background. Before the table can be swapped with 3226 * dm_bind_table, dm_suspend must be called to flush any in 3227 * flight bios and ensure that any further io gets deferred. 3228 */ 3229 /* 3230 * Suspend mechanism in request-based dm. 3231 * 3232 * 1. Flush all I/Os by lock_fs() if needed. 3233 * 2. Stop dispatching any I/O by stopping the request_queue. 3234 * 3. Wait for all in-flight I/Os to be completed or requeued. 3235 * 3236 * To abort suspend, start the request_queue. 3237 */ 3238 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 3239 { 3240 struct dm_table *map = NULL; 3241 int r = 0; 3242 3243 retry: 3244 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 3245 3246 if (dm_suspended_md(md)) { 3247 r = -EINVAL; 3248 goto out_unlock; 3249 } 3250 3251 if (dm_suspended_internally_md(md)) { 3252 /* already internally suspended, wait for internal resume */ 3253 mutex_unlock(&md->suspend_lock); 3254 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 3255 if (r) 3256 return r; 3257 goto retry; 3258 } 3259 3260 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3261 3262 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE); 3263 if (r) 3264 goto out_unlock; 3265 3266 set_bit(DMF_SUSPENDED, &md->flags); 3267 3268 dm_table_postsuspend_targets(map); 3269 3270 out_unlock: 3271 mutex_unlock(&md->suspend_lock); 3272 return r; 3273 } 3274 3275 static int __dm_resume(struct mapped_device *md, struct dm_table *map) 3276 { 3277 if (map) { 3278 int r = dm_table_resume_targets(map); 3279 if (r) 3280 return r; 3281 } 3282 3283 dm_queue_flush(md); 3284 3285 /* 3286 * Flushing deferred I/Os must be done after targets are resumed 3287 * so that mapping of targets can work correctly. 3288 * Request-based dm is queueing the deferred I/Os in its request_queue. 3289 */ 3290 if (dm_request_based(md)) 3291 dm_start_queue(md->queue); 3292 3293 unlock_fs(md); 3294 3295 return 0; 3296 } 3297 3298 int dm_resume(struct mapped_device *md) 3299 { 3300 int r = -EINVAL; 3301 struct dm_table *map = NULL; 3302 3303 retry: 3304 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 3305 3306 if (!dm_suspended_md(md)) 3307 goto out; 3308 3309 if (dm_suspended_internally_md(md)) { 3310 /* already internally suspended, wait for internal resume */ 3311 mutex_unlock(&md->suspend_lock); 3312 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 3313 if (r) 3314 return r; 3315 goto retry; 3316 } 3317 3318 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3319 if (!map || !dm_table_get_size(map)) 3320 goto out; 3321 3322 r = __dm_resume(md, map); 3323 if (r) 3324 goto out; 3325 3326 clear_bit(DMF_SUSPENDED, &md->flags); 3327 3328 r = 0; 3329 out: 3330 mutex_unlock(&md->suspend_lock); 3331 3332 return r; 3333 } 3334 3335 /* 3336 * Internal suspend/resume works like userspace-driven suspend. It waits 3337 * until all bios finish and prevents issuing new bios to the target drivers. 3338 * It may be used only from the kernel. 3339 */ 3340 3341 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 3342 { 3343 struct dm_table *map = NULL; 3344 3345 if (md->internal_suspend_count++) 3346 return; /* nested internal suspend */ 3347 3348 if (dm_suspended_md(md)) { 3349 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3350 return; /* nest suspend */ 3351 } 3352 3353 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3354 3355 /* 3356 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 3357 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 3358 * would require changing .presuspend to return an error -- avoid this 3359 * until there is a need for more elaborate variants of internal suspend. 3360 */ 3361 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE); 3362 3363 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3364 3365 dm_table_postsuspend_targets(map); 3366 } 3367 3368 static void __dm_internal_resume(struct mapped_device *md) 3369 { 3370 BUG_ON(!md->internal_suspend_count); 3371 3372 if (--md->internal_suspend_count) 3373 return; /* resume from nested internal suspend */ 3374 3375 if (dm_suspended_md(md)) 3376 goto done; /* resume from nested suspend */ 3377 3378 /* 3379 * NOTE: existing callers don't need to call dm_table_resume_targets 3380 * (which may fail -- so best to avoid it for now by passing NULL map) 3381 */ 3382 (void) __dm_resume(md, NULL); 3383 3384 done: 3385 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3386 smp_mb__after_atomic(); 3387 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 3388 } 3389 3390 void dm_internal_suspend_noflush(struct mapped_device *md) 3391 { 3392 mutex_lock(&md->suspend_lock); 3393 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 3394 mutex_unlock(&md->suspend_lock); 3395 } 3396 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 3397 3398 void dm_internal_resume(struct mapped_device *md) 3399 { 3400 mutex_lock(&md->suspend_lock); 3401 __dm_internal_resume(md); 3402 mutex_unlock(&md->suspend_lock); 3403 } 3404 EXPORT_SYMBOL_GPL(dm_internal_resume); 3405 3406 /* 3407 * Fast variants of internal suspend/resume hold md->suspend_lock, 3408 * which prevents interaction with userspace-driven suspend. 3409 */ 3410 3411 void dm_internal_suspend_fast(struct mapped_device *md) 3412 { 3413 mutex_lock(&md->suspend_lock); 3414 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 3415 return; 3416 3417 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 3418 synchronize_srcu(&md->io_barrier); 3419 flush_workqueue(md->wq); 3420 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 3421 } 3422 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 3423 3424 void dm_internal_resume_fast(struct mapped_device *md) 3425 { 3426 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 3427 goto done; 3428 3429 dm_queue_flush(md); 3430 3431 done: 3432 mutex_unlock(&md->suspend_lock); 3433 } 3434 EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 3435 3436 /*----------------------------------------------------------------- 3437 * Event notification. 3438 *---------------------------------------------------------------*/ 3439 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 3440 unsigned cookie) 3441 { 3442 char udev_cookie[DM_COOKIE_LENGTH]; 3443 char *envp[] = { udev_cookie, NULL }; 3444 3445 if (!cookie) 3446 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 3447 else { 3448 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 3449 DM_COOKIE_ENV_VAR_NAME, cookie); 3450 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 3451 action, envp); 3452 } 3453 } 3454 3455 uint32_t dm_next_uevent_seq(struct mapped_device *md) 3456 { 3457 return atomic_add_return(1, &md->uevent_seq); 3458 } 3459 3460 uint32_t dm_get_event_nr(struct mapped_device *md) 3461 { 3462 return atomic_read(&md->event_nr); 3463 } 3464 3465 int dm_wait_event(struct mapped_device *md, int event_nr) 3466 { 3467 return wait_event_interruptible(md->eventq, 3468 (event_nr != atomic_read(&md->event_nr))); 3469 } 3470 3471 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 3472 { 3473 unsigned long flags; 3474 3475 spin_lock_irqsave(&md->uevent_lock, flags); 3476 list_add(elist, &md->uevent_list); 3477 spin_unlock_irqrestore(&md->uevent_lock, flags); 3478 } 3479 3480 /* 3481 * The gendisk is only valid as long as you have a reference 3482 * count on 'md'. 3483 */ 3484 struct gendisk *dm_disk(struct mapped_device *md) 3485 { 3486 return md->disk; 3487 } 3488 EXPORT_SYMBOL_GPL(dm_disk); 3489 3490 struct kobject *dm_kobject(struct mapped_device *md) 3491 { 3492 return &md->kobj_holder.kobj; 3493 } 3494 3495 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 3496 { 3497 struct mapped_device *md; 3498 3499 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 3500 3501 if (test_bit(DMF_FREEING, &md->flags) || 3502 dm_deleting_md(md)) 3503 return NULL; 3504 3505 dm_get(md); 3506 return md; 3507 } 3508 3509 int dm_suspended_md(struct mapped_device *md) 3510 { 3511 return test_bit(DMF_SUSPENDED, &md->flags); 3512 } 3513 3514 int dm_suspended_internally_md(struct mapped_device *md) 3515 { 3516 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3517 } 3518 3519 int dm_test_deferred_remove_flag(struct mapped_device *md) 3520 { 3521 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 3522 } 3523 3524 int dm_suspended(struct dm_target *ti) 3525 { 3526 return dm_suspended_md(dm_table_get_md(ti->table)); 3527 } 3528 EXPORT_SYMBOL_GPL(dm_suspended); 3529 3530 int dm_noflush_suspending(struct dm_target *ti) 3531 { 3532 return __noflush_suspending(dm_table_get_md(ti->table)); 3533 } 3534 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 3535 3536 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, 3537 unsigned integrity, unsigned per_io_data_size) 3538 { 3539 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 3540 struct kmem_cache *cachep = NULL; 3541 unsigned int pool_size = 0; 3542 unsigned int front_pad; 3543 3544 if (!pools) 3545 return NULL; 3546 3547 type = filter_md_type(type, md); 3548 3549 switch (type) { 3550 case DM_TYPE_BIO_BASED: 3551 cachep = _io_cache; 3552 pool_size = dm_get_reserved_bio_based_ios(); 3553 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 3554 break; 3555 case DM_TYPE_REQUEST_BASED: 3556 cachep = _rq_tio_cache; 3557 pool_size = dm_get_reserved_rq_based_ios(); 3558 pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); 3559 if (!pools->rq_pool) 3560 goto out; 3561 /* fall through to setup remaining rq-based pools */ 3562 case DM_TYPE_MQ_REQUEST_BASED: 3563 if (!pool_size) 3564 pool_size = dm_get_reserved_rq_based_ios(); 3565 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 3566 /* per_io_data_size is used for blk-mq pdu at queue allocation */ 3567 break; 3568 default: 3569 BUG(); 3570 } 3571 3572 if (cachep) { 3573 pools->io_pool = mempool_create_slab_pool(pool_size, cachep); 3574 if (!pools->io_pool) 3575 goto out; 3576 } 3577 3578 pools->bs = bioset_create_nobvec(pool_size, front_pad); 3579 if (!pools->bs) 3580 goto out; 3581 3582 if (integrity && bioset_integrity_create(pools->bs, pool_size)) 3583 goto out; 3584 3585 return pools; 3586 3587 out: 3588 dm_free_md_mempools(pools); 3589 3590 return NULL; 3591 } 3592 3593 void dm_free_md_mempools(struct dm_md_mempools *pools) 3594 { 3595 if (!pools) 3596 return; 3597 3598 mempool_destroy(pools->io_pool); 3599 mempool_destroy(pools->rq_pool); 3600 3601 if (pools->bs) 3602 bioset_free(pools->bs); 3603 3604 kfree(pools); 3605 } 3606 3607 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 3608 u32 flags) 3609 { 3610 struct mapped_device *md = bdev->bd_disk->private_data; 3611 const struct pr_ops *ops; 3612 fmode_t mode; 3613 int r; 3614 3615 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3616 if (r < 0) 3617 return r; 3618 3619 ops = bdev->bd_disk->fops->pr_ops; 3620 if (ops && ops->pr_register) 3621 r = ops->pr_register(bdev, old_key, new_key, flags); 3622 else 3623 r = -EOPNOTSUPP; 3624 3625 bdput(bdev); 3626 return r; 3627 } 3628 3629 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 3630 u32 flags) 3631 { 3632 struct mapped_device *md = bdev->bd_disk->private_data; 3633 const struct pr_ops *ops; 3634 fmode_t mode; 3635 int r; 3636 3637 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3638 if (r < 0) 3639 return r; 3640 3641 ops = bdev->bd_disk->fops->pr_ops; 3642 if (ops && ops->pr_reserve) 3643 r = ops->pr_reserve(bdev, key, type, flags); 3644 else 3645 r = -EOPNOTSUPP; 3646 3647 bdput(bdev); 3648 return r; 3649 } 3650 3651 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 3652 { 3653 struct mapped_device *md = bdev->bd_disk->private_data; 3654 const struct pr_ops *ops; 3655 fmode_t mode; 3656 int r; 3657 3658 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3659 if (r < 0) 3660 return r; 3661 3662 ops = bdev->bd_disk->fops->pr_ops; 3663 if (ops && ops->pr_release) 3664 r = ops->pr_release(bdev, key, type); 3665 else 3666 r = -EOPNOTSUPP; 3667 3668 bdput(bdev); 3669 return r; 3670 } 3671 3672 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 3673 enum pr_type type, bool abort) 3674 { 3675 struct mapped_device *md = bdev->bd_disk->private_data; 3676 const struct pr_ops *ops; 3677 fmode_t mode; 3678 int r; 3679 3680 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3681 if (r < 0) 3682 return r; 3683 3684 ops = bdev->bd_disk->fops->pr_ops; 3685 if (ops && ops->pr_preempt) 3686 r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 3687 else 3688 r = -EOPNOTSUPP; 3689 3690 bdput(bdev); 3691 return r; 3692 } 3693 3694 static int dm_pr_clear(struct block_device *bdev, u64 key) 3695 { 3696 struct mapped_device *md = bdev->bd_disk->private_data; 3697 const struct pr_ops *ops; 3698 fmode_t mode; 3699 int r; 3700 3701 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3702 if (r < 0) 3703 return r; 3704 3705 ops = bdev->bd_disk->fops->pr_ops; 3706 if (ops && ops->pr_clear) 3707 r = ops->pr_clear(bdev, key); 3708 else 3709 r = -EOPNOTSUPP; 3710 3711 bdput(bdev); 3712 return r; 3713 } 3714 3715 static const struct pr_ops dm_pr_ops = { 3716 .pr_register = dm_pr_register, 3717 .pr_reserve = dm_pr_reserve, 3718 .pr_release = dm_pr_release, 3719 .pr_preempt = dm_pr_preempt, 3720 .pr_clear = dm_pr_clear, 3721 }; 3722 3723 static const struct block_device_operations dm_blk_dops = { 3724 .open = dm_blk_open, 3725 .release = dm_blk_close, 3726 .ioctl = dm_blk_ioctl, 3727 .getgeo = dm_blk_getgeo, 3728 .pr_ops = &dm_pr_ops, 3729 .owner = THIS_MODULE 3730 }; 3731 3732 /* 3733 * module hooks 3734 */ 3735 module_init(dm_init); 3736 module_exit(dm_exit); 3737 3738 module_param(major, uint, 0); 3739 MODULE_PARM_DESC(major, "The major number of the device mapper"); 3740 3741 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3742 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3743 3744 module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); 3745 MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); 3746 3747 module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR); 3748 MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices"); 3749 3750 module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR); 3751 MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices"); 3752 3753 module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR); 3754 MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices"); 3755 3756 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3757 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3758 3759 MODULE_DESCRIPTION(DM_NAME " driver"); 3760 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3761 MODULE_LICENSE("GPL"); 3762