1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-core.h" 9 #include "dm-rq.h" 10 #include "dm-uevent.h" 11 #include "dm-ima.h" 12 13 #include <linux/init.h> 14 #include <linux/module.h> 15 #include <linux/mutex.h> 16 #include <linux/sched/mm.h> 17 #include <linux/sched/signal.h> 18 #include <linux/blkpg.h> 19 #include <linux/bio.h> 20 #include <linux/mempool.h> 21 #include <linux/dax.h> 22 #include <linux/slab.h> 23 #include <linux/idr.h> 24 #include <linux/uio.h> 25 #include <linux/hdreg.h> 26 #include <linux/delay.h> 27 #include <linux/wait.h> 28 #include <linux/pr.h> 29 #include <linux/refcount.h> 30 #include <linux/part_stat.h> 31 #include <linux/blk-crypto.h> 32 #include <linux/blk-crypto-profile.h> 33 34 #define DM_MSG_PREFIX "core" 35 36 /* 37 * Cookies are numeric values sent with CHANGE and REMOVE 38 * uevents while resuming, removing or renaming the device. 39 */ 40 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 41 #define DM_COOKIE_LENGTH 24 42 43 /* 44 * For REQ_POLLED fs bio, this flag is set if we link mapped underlying 45 * dm_io into one list, and reuse bio->bi_private as the list head. Before 46 * ending this fs bio, we will recover its ->bi_private. 47 */ 48 #define REQ_DM_POLL_LIST REQ_DRV 49 50 static const char *_name = DM_NAME; 51 52 static unsigned int major = 0; 53 static unsigned int _major = 0; 54 55 static DEFINE_IDR(_minor_idr); 56 57 static DEFINE_SPINLOCK(_minor_lock); 58 59 static void do_deferred_remove(struct work_struct *w); 60 61 static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 62 63 static struct workqueue_struct *deferred_remove_workqueue; 64 65 atomic_t dm_global_event_nr = ATOMIC_INIT(0); 66 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 67 68 void dm_issue_global_event(void) 69 { 70 atomic_inc(&dm_global_event_nr); 71 wake_up(&dm_global_eventq); 72 } 73 74 /* 75 * One of these is allocated (on-stack) per original bio. 76 */ 77 struct clone_info { 78 struct dm_table *map; 79 struct bio *bio; 80 struct dm_io *io; 81 sector_t sector; 82 unsigned sector_count; 83 bool submit_as_polled; 84 }; 85 86 #define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone)) 87 #define DM_IO_BIO_OFFSET \ 88 (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio)) 89 90 static inline struct dm_target_io *clone_to_tio(struct bio *clone) 91 { 92 return container_of(clone, struct dm_target_io, clone); 93 } 94 95 void *dm_per_bio_data(struct bio *bio, size_t data_size) 96 { 97 if (!clone_to_tio(bio)->inside_dm_io) 98 return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; 99 return (char *)bio - DM_IO_BIO_OFFSET - data_size; 100 } 101 EXPORT_SYMBOL_GPL(dm_per_bio_data); 102 103 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 104 { 105 struct dm_io *io = (struct dm_io *)((char *)data + data_size); 106 if (io->magic == DM_IO_MAGIC) 107 return (struct bio *)((char *)io + DM_IO_BIO_OFFSET); 108 BUG_ON(io->magic != DM_TIO_MAGIC); 109 return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET); 110 } 111 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 112 113 unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 114 { 115 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 116 } 117 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 118 119 #define MINOR_ALLOCED ((void *)-1) 120 121 #define DM_NUMA_NODE NUMA_NO_NODE 122 static int dm_numa_node = DM_NUMA_NODE; 123 124 #define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE) 125 static int swap_bios = DEFAULT_SWAP_BIOS; 126 static int get_swap_bios(void) 127 { 128 int latch = READ_ONCE(swap_bios); 129 if (unlikely(latch <= 0)) 130 latch = DEFAULT_SWAP_BIOS; 131 return latch; 132 } 133 134 /* 135 * For mempools pre-allocation at the table loading time. 136 */ 137 struct dm_md_mempools { 138 struct bio_set bs; 139 struct bio_set io_bs; 140 }; 141 142 struct table_device { 143 struct list_head list; 144 refcount_t count; 145 struct dm_dev dm_dev; 146 }; 147 148 /* 149 * Bio-based DM's mempools' reserved IOs set by the user. 150 */ 151 #define RESERVED_BIO_BASED_IOS 16 152 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 153 154 static int __dm_get_module_param_int(int *module_param, int min, int max) 155 { 156 int param = READ_ONCE(*module_param); 157 int modified_param = 0; 158 bool modified = true; 159 160 if (param < min) 161 modified_param = min; 162 else if (param > max) 163 modified_param = max; 164 else 165 modified = false; 166 167 if (modified) { 168 (void)cmpxchg(module_param, param, modified_param); 169 param = modified_param; 170 } 171 172 return param; 173 } 174 175 unsigned __dm_get_module_param(unsigned *module_param, 176 unsigned def, unsigned max) 177 { 178 unsigned param = READ_ONCE(*module_param); 179 unsigned modified_param = 0; 180 181 if (!param) 182 modified_param = def; 183 else if (param > max) 184 modified_param = max; 185 186 if (modified_param) { 187 (void)cmpxchg(module_param, param, modified_param); 188 param = modified_param; 189 } 190 191 return param; 192 } 193 194 unsigned dm_get_reserved_bio_based_ios(void) 195 { 196 return __dm_get_module_param(&reserved_bio_based_ios, 197 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 198 } 199 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 200 201 static unsigned dm_get_numa_node(void) 202 { 203 return __dm_get_module_param_int(&dm_numa_node, 204 DM_NUMA_NODE, num_online_nodes() - 1); 205 } 206 207 static int __init local_init(void) 208 { 209 int r; 210 211 r = dm_uevent_init(); 212 if (r) 213 return r; 214 215 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 216 if (!deferred_remove_workqueue) { 217 r = -ENOMEM; 218 goto out_uevent_exit; 219 } 220 221 _major = major; 222 r = register_blkdev(_major, _name); 223 if (r < 0) 224 goto out_free_workqueue; 225 226 if (!_major) 227 _major = r; 228 229 return 0; 230 231 out_free_workqueue: 232 destroy_workqueue(deferred_remove_workqueue); 233 out_uevent_exit: 234 dm_uevent_exit(); 235 236 return r; 237 } 238 239 static void local_exit(void) 240 { 241 flush_scheduled_work(); 242 destroy_workqueue(deferred_remove_workqueue); 243 244 unregister_blkdev(_major, _name); 245 dm_uevent_exit(); 246 247 _major = 0; 248 249 DMINFO("cleaned up"); 250 } 251 252 static int (*_inits[])(void) __initdata = { 253 local_init, 254 dm_target_init, 255 dm_linear_init, 256 dm_stripe_init, 257 dm_io_init, 258 dm_kcopyd_init, 259 dm_interface_init, 260 dm_statistics_init, 261 }; 262 263 static void (*_exits[])(void) = { 264 local_exit, 265 dm_target_exit, 266 dm_linear_exit, 267 dm_stripe_exit, 268 dm_io_exit, 269 dm_kcopyd_exit, 270 dm_interface_exit, 271 dm_statistics_exit, 272 }; 273 274 static int __init dm_init(void) 275 { 276 const int count = ARRAY_SIZE(_inits); 277 int r, i; 278 279 #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) 280 DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled." 281 " Duplicate IMA measurements will not be recorded in the IMA log."); 282 #endif 283 284 for (i = 0; i < count; i++) { 285 r = _inits[i](); 286 if (r) 287 goto bad; 288 } 289 290 return 0; 291 bad: 292 while (i--) 293 _exits[i](); 294 295 return r; 296 } 297 298 static void __exit dm_exit(void) 299 { 300 int i = ARRAY_SIZE(_exits); 301 302 while (i--) 303 _exits[i](); 304 305 /* 306 * Should be empty by this point. 307 */ 308 idr_destroy(&_minor_idr); 309 } 310 311 /* 312 * Block device functions 313 */ 314 int dm_deleting_md(struct mapped_device *md) 315 { 316 return test_bit(DMF_DELETING, &md->flags); 317 } 318 319 static int dm_blk_open(struct block_device *bdev, fmode_t mode) 320 { 321 struct mapped_device *md; 322 323 spin_lock(&_minor_lock); 324 325 md = bdev->bd_disk->private_data; 326 if (!md) 327 goto out; 328 329 if (test_bit(DMF_FREEING, &md->flags) || 330 dm_deleting_md(md)) { 331 md = NULL; 332 goto out; 333 } 334 335 dm_get(md); 336 atomic_inc(&md->open_count); 337 out: 338 spin_unlock(&_minor_lock); 339 340 return md ? 0 : -ENXIO; 341 } 342 343 static void dm_blk_close(struct gendisk *disk, fmode_t mode) 344 { 345 struct mapped_device *md; 346 347 spin_lock(&_minor_lock); 348 349 md = disk->private_data; 350 if (WARN_ON(!md)) 351 goto out; 352 353 if (atomic_dec_and_test(&md->open_count) && 354 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 355 queue_work(deferred_remove_workqueue, &deferred_remove_work); 356 357 dm_put(md); 358 out: 359 spin_unlock(&_minor_lock); 360 } 361 362 int dm_open_count(struct mapped_device *md) 363 { 364 return atomic_read(&md->open_count); 365 } 366 367 /* 368 * Guarantees nothing is using the device before it's deleted. 369 */ 370 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 371 { 372 int r = 0; 373 374 spin_lock(&_minor_lock); 375 376 if (dm_open_count(md)) { 377 r = -EBUSY; 378 if (mark_deferred) 379 set_bit(DMF_DEFERRED_REMOVE, &md->flags); 380 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 381 r = -EEXIST; 382 else 383 set_bit(DMF_DELETING, &md->flags); 384 385 spin_unlock(&_minor_lock); 386 387 return r; 388 } 389 390 int dm_cancel_deferred_remove(struct mapped_device *md) 391 { 392 int r = 0; 393 394 spin_lock(&_minor_lock); 395 396 if (test_bit(DMF_DELETING, &md->flags)) 397 r = -EBUSY; 398 else 399 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 400 401 spin_unlock(&_minor_lock); 402 403 return r; 404 } 405 406 static void do_deferred_remove(struct work_struct *w) 407 { 408 dm_deferred_remove(); 409 } 410 411 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 412 { 413 struct mapped_device *md = bdev->bd_disk->private_data; 414 415 return dm_get_geometry(md, geo); 416 } 417 418 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 419 struct block_device **bdev) 420 { 421 struct dm_target *tgt; 422 struct dm_table *map; 423 int r; 424 425 retry: 426 r = -ENOTTY; 427 map = dm_get_live_table(md, srcu_idx); 428 if (!map || !dm_table_get_size(map)) 429 return r; 430 431 /* We only support devices that have a single target */ 432 if (dm_table_get_num_targets(map) != 1) 433 return r; 434 435 tgt = dm_table_get_target(map, 0); 436 if (!tgt->type->prepare_ioctl) 437 return r; 438 439 if (dm_suspended_md(md)) 440 return -EAGAIN; 441 442 r = tgt->type->prepare_ioctl(tgt, bdev); 443 if (r == -ENOTCONN && !fatal_signal_pending(current)) { 444 dm_put_live_table(md, *srcu_idx); 445 msleep(10); 446 goto retry; 447 } 448 449 return r; 450 } 451 452 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 453 { 454 dm_put_live_table(md, srcu_idx); 455 } 456 457 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 458 unsigned int cmd, unsigned long arg) 459 { 460 struct mapped_device *md = bdev->bd_disk->private_data; 461 int r, srcu_idx; 462 463 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 464 if (r < 0) 465 goto out; 466 467 if (r > 0) { 468 /* 469 * Target determined this ioctl is being issued against a 470 * subset of the parent bdev; require extra privileges. 471 */ 472 if (!capable(CAP_SYS_RAWIO)) { 473 DMDEBUG_LIMIT( 474 "%s: sending ioctl %x to DM device without required privilege.", 475 current->comm, cmd); 476 r = -ENOIOCTLCMD; 477 goto out; 478 } 479 } 480 481 if (!bdev->bd_disk->fops->ioctl) 482 r = -ENOTTY; 483 else 484 r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); 485 out: 486 dm_unprepare_ioctl(md, srcu_idx); 487 return r; 488 } 489 490 u64 dm_start_time_ns_from_clone(struct bio *bio) 491 { 492 return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time); 493 } 494 EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); 495 496 static bool bio_is_flush_with_data(struct bio *bio) 497 { 498 return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size); 499 } 500 501 static void dm_io_acct(bool end, struct mapped_device *md, struct bio *bio, 502 unsigned long start_time, struct dm_stats_aux *stats_aux) 503 { 504 bool is_flush_with_data; 505 unsigned int bi_size; 506 507 /* If REQ_PREFLUSH set save any payload but do not account it */ 508 is_flush_with_data = bio_is_flush_with_data(bio); 509 if (is_flush_with_data) { 510 bi_size = bio->bi_iter.bi_size; 511 bio->bi_iter.bi_size = 0; 512 } 513 514 if (!end) 515 bio_start_io_acct_time(bio, start_time); 516 else 517 bio_end_io_acct(bio, start_time); 518 519 if (unlikely(dm_stats_used(&md->stats))) 520 dm_stats_account_io(&md->stats, bio_data_dir(bio), 521 bio->bi_iter.bi_sector, bio_sectors(bio), 522 end, start_time, stats_aux); 523 524 /* Restore bio's payload so it does get accounted upon requeue */ 525 if (is_flush_with_data) 526 bio->bi_iter.bi_size = bi_size; 527 } 528 529 static void __dm_start_io_acct(struct dm_io *io, struct bio *bio) 530 { 531 dm_io_acct(false, io->md, bio, io->start_time, &io->stats_aux); 532 } 533 534 static void dm_start_io_acct(struct dm_io *io, struct bio *clone) 535 { 536 /* Must account IO to DM device in terms of orig_bio */ 537 struct bio *bio = io->orig_bio; 538 539 /* 540 * Ensure IO accounting is only ever started once. 541 * Expect no possibility for race unless is_duplicate_bio. 542 */ 543 if (!clone || likely(!clone_to_tio(clone)->is_duplicate_bio)) { 544 if (WARN_ON_ONCE(dm_io_flagged(io, DM_IO_ACCOUNTED))) 545 return; 546 dm_io_set_flag(io, DM_IO_ACCOUNTED); 547 } else { 548 unsigned long flags; 549 if (dm_io_flagged(io, DM_IO_ACCOUNTED)) 550 return; 551 /* Can afford locking given is_duplicate_bio */ 552 spin_lock_irqsave(&io->startio_lock, flags); 553 dm_io_set_flag(io, DM_IO_ACCOUNTED); 554 spin_unlock_irqrestore(&io->startio_lock, flags); 555 } 556 557 __dm_start_io_acct(io, bio); 558 } 559 560 static void dm_end_io_acct(struct dm_io *io, struct bio *bio) 561 { 562 dm_io_acct(true, io->md, bio, io->start_time, &io->stats_aux); 563 } 564 565 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 566 { 567 struct dm_io *io; 568 struct dm_target_io *tio; 569 struct bio *clone; 570 571 clone = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, &md->io_bs); 572 573 tio = clone_to_tio(clone); 574 tio->inside_dm_io = true; 575 tio->io = NULL; 576 577 io = container_of(tio, struct dm_io, tio); 578 io->magic = DM_IO_MAGIC; 579 io->status = 0; 580 atomic_set(&io->io_count, 1); 581 this_cpu_inc(*md->pending_io); 582 io->orig_bio = NULL; 583 io->md = md; 584 io->map_task = current; 585 spin_lock_init(&io->startio_lock); 586 spin_lock_init(&io->endio_lock); 587 io->start_time = jiffies; 588 io->flags = 0; 589 590 dm_stats_record_start(&md->stats, &io->stats_aux); 591 592 return io; 593 } 594 595 static void free_io(struct dm_io *io) 596 { 597 bio_put(&io->tio.clone); 598 } 599 600 static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, 601 unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask) 602 { 603 struct dm_target_io *tio; 604 struct bio *clone; 605 606 if (!ci->io->tio.io) { 607 /* the dm_target_io embedded in ci->io is available */ 608 tio = &ci->io->tio; 609 /* alloc_io() already initialized embedded clone */ 610 clone = &tio->clone; 611 } else { 612 clone = bio_alloc_clone(ci->bio->bi_bdev, ci->bio, 613 gfp_mask, &ci->io->md->bs); 614 if (!clone) 615 return NULL; 616 617 /* REQ_DM_POLL_LIST shouldn't be inherited */ 618 clone->bi_opf &= ~REQ_DM_POLL_LIST; 619 620 tio = clone_to_tio(clone); 621 tio->inside_dm_io = false; 622 } 623 624 tio->magic = DM_TIO_MAGIC; 625 tio->io = ci->io; 626 tio->ti = ti; 627 tio->target_bio_nr = target_bio_nr; 628 tio->is_duplicate_bio = false; 629 tio->len_ptr = len; 630 tio->old_sector = 0; 631 632 if (len) { 633 clone->bi_iter.bi_size = to_bytes(*len); 634 if (bio_integrity(clone)) 635 bio_integrity_trim(clone); 636 } 637 638 return clone; 639 } 640 641 static void free_tio(struct bio *clone) 642 { 643 if (clone_to_tio(clone)->inside_dm_io) 644 return; 645 bio_put(clone); 646 } 647 648 /* 649 * Add the bio to the list of deferred io. 650 */ 651 static void queue_io(struct mapped_device *md, struct bio *bio) 652 { 653 unsigned long flags; 654 655 spin_lock_irqsave(&md->deferred_lock, flags); 656 bio_list_add(&md->deferred, bio); 657 spin_unlock_irqrestore(&md->deferred_lock, flags); 658 queue_work(md->wq, &md->work); 659 } 660 661 /* 662 * Everyone (including functions in this file), should use this 663 * function to access the md->map field, and make sure they call 664 * dm_put_live_table() when finished. 665 */ 666 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 667 { 668 *srcu_idx = srcu_read_lock(&md->io_barrier); 669 670 return srcu_dereference(md->map, &md->io_barrier); 671 } 672 673 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 674 { 675 srcu_read_unlock(&md->io_barrier, srcu_idx); 676 } 677 678 void dm_sync_table(struct mapped_device *md) 679 { 680 synchronize_srcu(&md->io_barrier); 681 synchronize_rcu_expedited(); 682 } 683 684 /* 685 * A fast alternative to dm_get_live_table/dm_put_live_table. 686 * The caller must not block between these two functions. 687 */ 688 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 689 { 690 rcu_read_lock(); 691 return rcu_dereference(md->map); 692 } 693 694 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 695 { 696 rcu_read_unlock(); 697 } 698 699 static char *_dm_claim_ptr = "I belong to device-mapper"; 700 701 /* 702 * Open a table device so we can use it as a map destination. 703 */ 704 static int open_table_device(struct table_device *td, dev_t dev, 705 struct mapped_device *md) 706 { 707 struct block_device *bdev; 708 u64 part_off; 709 int r; 710 711 BUG_ON(td->dm_dev.bdev); 712 713 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); 714 if (IS_ERR(bdev)) 715 return PTR_ERR(bdev); 716 717 r = bd_link_disk_holder(bdev, dm_disk(md)); 718 if (r) { 719 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 720 return r; 721 } 722 723 td->dm_dev.bdev = bdev; 724 td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off); 725 return 0; 726 } 727 728 /* 729 * Close a table device that we've been using. 730 */ 731 static void close_table_device(struct table_device *td, struct mapped_device *md) 732 { 733 if (!td->dm_dev.bdev) 734 return; 735 736 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 737 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 738 put_dax(td->dm_dev.dax_dev); 739 td->dm_dev.bdev = NULL; 740 td->dm_dev.dax_dev = NULL; 741 } 742 743 static struct table_device *find_table_device(struct list_head *l, dev_t dev, 744 fmode_t mode) 745 { 746 struct table_device *td; 747 748 list_for_each_entry(td, l, list) 749 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 750 return td; 751 752 return NULL; 753 } 754 755 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 756 struct dm_dev **result) 757 { 758 int r; 759 struct table_device *td; 760 761 mutex_lock(&md->table_devices_lock); 762 td = find_table_device(&md->table_devices, dev, mode); 763 if (!td) { 764 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 765 if (!td) { 766 mutex_unlock(&md->table_devices_lock); 767 return -ENOMEM; 768 } 769 770 td->dm_dev.mode = mode; 771 td->dm_dev.bdev = NULL; 772 773 if ((r = open_table_device(td, dev, md))) { 774 mutex_unlock(&md->table_devices_lock); 775 kfree(td); 776 return r; 777 } 778 779 format_dev_t(td->dm_dev.name, dev); 780 781 refcount_set(&td->count, 1); 782 list_add(&td->list, &md->table_devices); 783 } else { 784 refcount_inc(&td->count); 785 } 786 mutex_unlock(&md->table_devices_lock); 787 788 *result = &td->dm_dev; 789 return 0; 790 } 791 792 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 793 { 794 struct table_device *td = container_of(d, struct table_device, dm_dev); 795 796 mutex_lock(&md->table_devices_lock); 797 if (refcount_dec_and_test(&td->count)) { 798 close_table_device(td, md); 799 list_del(&td->list); 800 kfree(td); 801 } 802 mutex_unlock(&md->table_devices_lock); 803 } 804 805 static void free_table_devices(struct list_head *devices) 806 { 807 struct list_head *tmp, *next; 808 809 list_for_each_safe(tmp, next, devices) { 810 struct table_device *td = list_entry(tmp, struct table_device, list); 811 812 DMWARN("dm_destroy: %s still exists with %d references", 813 td->dm_dev.name, refcount_read(&td->count)); 814 kfree(td); 815 } 816 } 817 818 /* 819 * Get the geometry associated with a dm device 820 */ 821 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 822 { 823 *geo = md->geometry; 824 825 return 0; 826 } 827 828 /* 829 * Set the geometry of a device. 830 */ 831 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 832 { 833 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 834 835 if (geo->start > sz) { 836 DMWARN("Start sector is beyond the geometry limits."); 837 return -EINVAL; 838 } 839 840 md->geometry = *geo; 841 842 return 0; 843 } 844 845 static int __noflush_suspending(struct mapped_device *md) 846 { 847 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 848 } 849 850 static void dm_io_complete(struct dm_io *io) 851 { 852 blk_status_t io_error; 853 struct mapped_device *md = io->md; 854 struct bio *bio = io->orig_bio; 855 856 if (io->status == BLK_STS_DM_REQUEUE) { 857 unsigned long flags; 858 /* 859 * Target requested pushing back the I/O. 860 */ 861 spin_lock_irqsave(&md->deferred_lock, flags); 862 if (__noflush_suspending(md) && 863 !WARN_ON_ONCE(dm_is_zone_write(md, bio))) { 864 /* NOTE early return due to BLK_STS_DM_REQUEUE below */ 865 bio_list_add_head(&md->deferred, bio); 866 } else { 867 /* 868 * noflush suspend was interrupted or this is 869 * a write to a zoned target. 870 */ 871 io->status = BLK_STS_IOERR; 872 } 873 spin_unlock_irqrestore(&md->deferred_lock, flags); 874 } 875 876 io_error = io->status; 877 if (dm_io_flagged(io, DM_IO_ACCOUNTED)) 878 dm_end_io_acct(io, bio); 879 else if (!io_error) { 880 /* 881 * Must handle target that DM_MAPIO_SUBMITTED only to 882 * then bio_endio() rather than dm_submit_bio_remap() 883 */ 884 __dm_start_io_acct(io, bio); 885 dm_end_io_acct(io, bio); 886 } 887 free_io(io); 888 smp_wmb(); 889 this_cpu_dec(*md->pending_io); 890 891 /* nudge anyone waiting on suspend queue */ 892 if (unlikely(wq_has_sleeper(&md->wait))) 893 wake_up(&md->wait); 894 895 if (io_error == BLK_STS_DM_REQUEUE) { 896 /* 897 * Upper layer won't help us poll split bio, io->orig_bio 898 * may only reflect a subset of the pre-split original, 899 * so clear REQ_POLLED in case of requeue 900 */ 901 bio->bi_opf &= ~REQ_POLLED; 902 return; 903 } 904 905 if (bio_is_flush_with_data(bio)) { 906 /* 907 * Preflush done for flush with data, reissue 908 * without REQ_PREFLUSH. 909 */ 910 bio->bi_opf &= ~REQ_PREFLUSH; 911 queue_io(md, bio); 912 } else { 913 /* done with normal IO or empty flush */ 914 if (io_error) 915 bio->bi_status = io_error; 916 bio_endio(bio); 917 } 918 } 919 920 /* 921 * Decrements the number of outstanding ios that a bio has been 922 * cloned into, completing the original io if necc. 923 */ 924 void dm_io_dec_pending(struct dm_io *io, blk_status_t error) 925 { 926 /* Push-back supersedes any I/O errors */ 927 if (unlikely(error)) { 928 unsigned long flags; 929 spin_lock_irqsave(&io->endio_lock, flags); 930 if (!(io->status == BLK_STS_DM_REQUEUE && 931 __noflush_suspending(io->md))) 932 io->status = error; 933 spin_unlock_irqrestore(&io->endio_lock, flags); 934 } 935 936 if (atomic_dec_and_test(&io->io_count)) 937 dm_io_complete(io); 938 } 939 940 void disable_discard(struct mapped_device *md) 941 { 942 struct queue_limits *limits = dm_get_queue_limits(md); 943 944 /* device doesn't really support DISCARD, disable it */ 945 limits->max_discard_sectors = 0; 946 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); 947 } 948 949 void disable_write_same(struct mapped_device *md) 950 { 951 struct queue_limits *limits = dm_get_queue_limits(md); 952 953 /* device doesn't really support WRITE SAME, disable it */ 954 limits->max_write_same_sectors = 0; 955 } 956 957 void disable_write_zeroes(struct mapped_device *md) 958 { 959 struct queue_limits *limits = dm_get_queue_limits(md); 960 961 /* device doesn't really support WRITE ZEROES, disable it */ 962 limits->max_write_zeroes_sectors = 0; 963 } 964 965 static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) 966 { 967 return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); 968 } 969 970 static void clone_endio(struct bio *bio) 971 { 972 blk_status_t error = bio->bi_status; 973 struct dm_target_io *tio = clone_to_tio(bio); 974 struct dm_io *io = tio->io; 975 struct mapped_device *md = tio->io->md; 976 dm_endio_fn endio = tio->ti->type->end_io; 977 struct request_queue *q = bio->bi_bdev->bd_disk->queue; 978 979 if (unlikely(error == BLK_STS_TARGET)) { 980 if (bio_op(bio) == REQ_OP_DISCARD && 981 !q->limits.max_discard_sectors) 982 disable_discard(md); 983 else if (bio_op(bio) == REQ_OP_WRITE_SAME && 984 !q->limits.max_write_same_sectors) 985 disable_write_same(md); 986 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 987 !q->limits.max_write_zeroes_sectors) 988 disable_write_zeroes(md); 989 } 990 991 if (blk_queue_is_zoned(q)) 992 dm_zone_endio(io, bio); 993 994 if (endio) { 995 int r = endio(tio->ti, bio, &error); 996 switch (r) { 997 case DM_ENDIO_REQUEUE: 998 /* 999 * Requeuing writes to a sequential zone of a zoned 1000 * target will break the sequential write pattern: 1001 * fail such IO. 1002 */ 1003 if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) 1004 error = BLK_STS_IOERR; 1005 else 1006 error = BLK_STS_DM_REQUEUE; 1007 fallthrough; 1008 case DM_ENDIO_DONE: 1009 break; 1010 case DM_ENDIO_INCOMPLETE: 1011 /* The target will handle the io */ 1012 return; 1013 default: 1014 DMWARN("unimplemented target endio return value: %d", r); 1015 BUG(); 1016 } 1017 } 1018 1019 if (unlikely(swap_bios_limit(tio->ti, bio))) { 1020 struct mapped_device *md = io->md; 1021 up(&md->swap_bios_semaphore); 1022 } 1023 1024 free_tio(bio); 1025 dm_io_dec_pending(io, error); 1026 } 1027 1028 /* 1029 * Return maximum size of I/O possible at the supplied sector up to the current 1030 * target boundary. 1031 */ 1032 static inline sector_t max_io_len_target_boundary(struct dm_target *ti, 1033 sector_t target_offset) 1034 { 1035 return ti->len - target_offset; 1036 } 1037 1038 static sector_t max_io_len(struct dm_target *ti, sector_t sector) 1039 { 1040 sector_t target_offset = dm_target_offset(ti, sector); 1041 sector_t len = max_io_len_target_boundary(ti, target_offset); 1042 sector_t max_len; 1043 1044 /* 1045 * Does the target need to split IO even further? 1046 * - varied (per target) IO splitting is a tenet of DM; this 1047 * explains why stacked chunk_sectors based splitting via 1048 * blk_max_size_offset() isn't possible here. So pass in 1049 * ti->max_io_len to override stacked chunk_sectors. 1050 */ 1051 if (ti->max_io_len) { 1052 max_len = blk_max_size_offset(ti->table->md->queue, 1053 target_offset, ti->max_io_len); 1054 if (len > max_len) 1055 len = max_len; 1056 } 1057 1058 return len; 1059 } 1060 1061 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1062 { 1063 if (len > UINT_MAX) { 1064 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1065 (unsigned long long)len, UINT_MAX); 1066 ti->error = "Maximum size of target IO is too large"; 1067 return -EINVAL; 1068 } 1069 1070 ti->max_io_len = (uint32_t) len; 1071 1072 return 0; 1073 } 1074 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1075 1076 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1077 sector_t sector, int *srcu_idx) 1078 __acquires(md->io_barrier) 1079 { 1080 struct dm_table *map; 1081 struct dm_target *ti; 1082 1083 map = dm_get_live_table(md, srcu_idx); 1084 if (!map) 1085 return NULL; 1086 1087 ti = dm_table_find_target(map, sector); 1088 if (!ti) 1089 return NULL; 1090 1091 return ti; 1092 } 1093 1094 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1095 long nr_pages, void **kaddr, pfn_t *pfn) 1096 { 1097 struct mapped_device *md = dax_get_private(dax_dev); 1098 sector_t sector = pgoff * PAGE_SECTORS; 1099 struct dm_target *ti; 1100 long len, ret = -EIO; 1101 int srcu_idx; 1102 1103 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1104 1105 if (!ti) 1106 goto out; 1107 if (!ti->type->direct_access) 1108 goto out; 1109 len = max_io_len(ti, sector) / PAGE_SECTORS; 1110 if (len < 1) 1111 goto out; 1112 nr_pages = min(len, nr_pages); 1113 ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 1114 1115 out: 1116 dm_put_live_table(md, srcu_idx); 1117 1118 return ret; 1119 } 1120 1121 static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 1122 size_t nr_pages) 1123 { 1124 struct mapped_device *md = dax_get_private(dax_dev); 1125 sector_t sector = pgoff * PAGE_SECTORS; 1126 struct dm_target *ti; 1127 int ret = -EIO; 1128 int srcu_idx; 1129 1130 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1131 1132 if (!ti) 1133 goto out; 1134 if (WARN_ON(!ti->type->dax_zero_page_range)) { 1135 /* 1136 * ->zero_page_range() is mandatory dax operation. If we are 1137 * here, something is wrong. 1138 */ 1139 goto out; 1140 } 1141 ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); 1142 out: 1143 dm_put_live_table(md, srcu_idx); 1144 1145 return ret; 1146 } 1147 1148 /* 1149 * A target may call dm_accept_partial_bio only from the map routine. It is 1150 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management 1151 * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by 1152 * __send_duplicate_bios(). 1153 * 1154 * dm_accept_partial_bio informs the dm that the target only wants to process 1155 * additional n_sectors sectors of the bio and the rest of the data should be 1156 * sent in a next bio. 1157 * 1158 * A diagram that explains the arithmetics: 1159 * +--------------------+---------------+-------+ 1160 * | 1 | 2 | 3 | 1161 * +--------------------+---------------+-------+ 1162 * 1163 * <-------------- *tio->len_ptr ---------------> 1164 * <------- bi_size -------> 1165 * <-- n_sectors --> 1166 * 1167 * Region 1 was already iterated over with bio_advance or similar function. 1168 * (it may be empty if the target doesn't use bio_advance) 1169 * Region 2 is the remaining bio size that the target wants to process. 1170 * (it may be empty if region 1 is non-empty, although there is no reason 1171 * to make it empty) 1172 * The target requires that region 3 is to be sent in the next bio. 1173 * 1174 * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 1175 * the partially processed part (the sum of regions 1+2) must be the same for all 1176 * copies of the bio. 1177 */ 1178 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 1179 { 1180 struct dm_target_io *tio = clone_to_tio(bio); 1181 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 1182 1183 BUG_ON(tio->is_duplicate_bio); 1184 BUG_ON(op_is_zone_mgmt(bio_op(bio))); 1185 BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND); 1186 BUG_ON(bi_size > *tio->len_ptr); 1187 BUG_ON(n_sectors > bi_size); 1188 1189 *tio->len_ptr -= bi_size - n_sectors; 1190 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1191 } 1192 EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1193 1194 static inline void __dm_submit_bio_remap(struct bio *clone, 1195 dev_t dev, sector_t old_sector) 1196 { 1197 trace_block_bio_remap(clone, dev, old_sector); 1198 submit_bio_noacct(clone); 1199 } 1200 1201 /* 1202 * @clone: clone bio that DM core passed to target's .map function 1203 * @tgt_clone: clone of @clone bio that target needs submitted 1204 * 1205 * Targets should use this interface to submit bios they take 1206 * ownership of when returning DM_MAPIO_SUBMITTED. 1207 * 1208 * Target should also enable ti->accounts_remapped_io 1209 */ 1210 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone) 1211 { 1212 struct dm_target_io *tio = clone_to_tio(clone); 1213 struct dm_io *io = tio->io; 1214 1215 WARN_ON_ONCE(!tio->ti->accounts_remapped_io); 1216 1217 /* establish bio that will get submitted */ 1218 if (!tgt_clone) 1219 tgt_clone = clone; 1220 1221 /* 1222 * Account io->origin_bio to DM dev on behalf of target 1223 * that took ownership of IO with DM_MAPIO_SUBMITTED. 1224 */ 1225 if (io->map_task == current) { 1226 /* Still in target's map function */ 1227 dm_io_set_flag(io, DM_IO_START_ACCT); 1228 } else { 1229 /* 1230 * Called by another thread, managed by DM target, 1231 * wait for dm_split_and_process_bio() to store 1232 * io->orig_bio 1233 */ 1234 while (unlikely(!smp_load_acquire(&io->orig_bio))) 1235 msleep(1); 1236 dm_start_io_acct(io, clone); 1237 } 1238 1239 __dm_submit_bio_remap(tgt_clone, disk_devt(io->md->disk), 1240 tio->old_sector); 1241 } 1242 EXPORT_SYMBOL_GPL(dm_submit_bio_remap); 1243 1244 static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) 1245 { 1246 mutex_lock(&md->swap_bios_lock); 1247 while (latch < md->swap_bios) { 1248 cond_resched(); 1249 down(&md->swap_bios_semaphore); 1250 md->swap_bios--; 1251 } 1252 while (latch > md->swap_bios) { 1253 cond_resched(); 1254 up(&md->swap_bios_semaphore); 1255 md->swap_bios++; 1256 } 1257 mutex_unlock(&md->swap_bios_lock); 1258 } 1259 1260 static void __map_bio(struct bio *clone) 1261 { 1262 struct dm_target_io *tio = clone_to_tio(clone); 1263 int r; 1264 struct dm_io *io = tio->io; 1265 struct dm_target *ti = tio->ti; 1266 1267 clone->bi_end_io = clone_endio; 1268 1269 /* 1270 * Map the clone. 1271 */ 1272 dm_io_inc_pending(io); 1273 tio->old_sector = clone->bi_iter.bi_sector; 1274 1275 if (unlikely(swap_bios_limit(ti, clone))) { 1276 struct mapped_device *md = io->md; 1277 int latch = get_swap_bios(); 1278 if (unlikely(latch != md->swap_bios)) 1279 __set_swap_bios_limit(md, latch); 1280 down(&md->swap_bios_semaphore); 1281 } 1282 1283 /* 1284 * Check if the IO needs a special mapping due to zone append emulation 1285 * on zoned target. In this case, dm_zone_map_bio() calls the target 1286 * map operation. 1287 */ 1288 if (dm_emulate_zone_append(io->md)) 1289 r = dm_zone_map_bio(tio); 1290 else 1291 r = ti->type->map(ti, clone); 1292 1293 switch (r) { 1294 case DM_MAPIO_SUBMITTED: 1295 /* target has assumed ownership of this io */ 1296 if (!ti->accounts_remapped_io) 1297 dm_io_set_flag(io, DM_IO_START_ACCT); 1298 break; 1299 case DM_MAPIO_REMAPPED: 1300 /* 1301 * the bio has been remapped so dispatch it, but defer 1302 * dm_start_io_acct() until after possible bio_split(). 1303 */ 1304 __dm_submit_bio_remap(clone, disk_devt(io->md->disk), 1305 tio->old_sector); 1306 dm_io_set_flag(io, DM_IO_START_ACCT); 1307 break; 1308 case DM_MAPIO_KILL: 1309 case DM_MAPIO_REQUEUE: 1310 if (unlikely(swap_bios_limit(ti, clone))) 1311 up(&io->md->swap_bios_semaphore); 1312 free_tio(clone); 1313 if (r == DM_MAPIO_KILL) 1314 dm_io_dec_pending(io, BLK_STS_IOERR); 1315 else 1316 dm_io_dec_pending(io, BLK_STS_DM_REQUEUE); 1317 break; 1318 default: 1319 DMWARN("unimplemented target map return value: %d", r); 1320 BUG(); 1321 } 1322 } 1323 1324 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1325 struct dm_target *ti, unsigned num_bios, 1326 unsigned *len) 1327 { 1328 struct bio *bio; 1329 int try; 1330 1331 for (try = 0; try < 2; try++) { 1332 int bio_nr; 1333 1334 if (try) 1335 mutex_lock(&ci->io->md->table_devices_lock); 1336 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1337 bio = alloc_tio(ci, ti, bio_nr, len, 1338 try ? GFP_NOIO : GFP_NOWAIT); 1339 if (!bio) 1340 break; 1341 1342 bio_list_add(blist, bio); 1343 } 1344 if (try) 1345 mutex_unlock(&ci->io->md->table_devices_lock); 1346 if (bio_nr == num_bios) 1347 return; 1348 1349 while ((bio = bio_list_pop(blist))) 1350 free_tio(bio); 1351 } 1352 } 1353 1354 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1355 unsigned num_bios, unsigned *len) 1356 { 1357 struct bio_list blist = BIO_EMPTY_LIST; 1358 struct bio *clone; 1359 1360 switch (num_bios) { 1361 case 0: 1362 break; 1363 case 1: 1364 clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); 1365 clone_to_tio(clone)->is_duplicate_bio = true; 1366 __map_bio(clone); 1367 break; 1368 default: 1369 alloc_multiple_bios(&blist, ci, ti, num_bios, len); 1370 while ((clone = bio_list_pop(&blist))) { 1371 clone_to_tio(clone)->is_duplicate_bio = true; 1372 __map_bio(clone); 1373 } 1374 break; 1375 } 1376 } 1377 1378 static void __send_empty_flush(struct clone_info *ci) 1379 { 1380 unsigned target_nr = 0; 1381 struct dm_target *ti; 1382 struct bio flush_bio; 1383 1384 /* 1385 * Use an on-stack bio for this, it's safe since we don't 1386 * need to reference it after submit. It's just used as 1387 * the basis for the clone(s). 1388 */ 1389 bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, 1390 REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC); 1391 1392 ci->bio = &flush_bio; 1393 ci->sector_count = 0; 1394 1395 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1396 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1397 1398 bio_uninit(ci->bio); 1399 } 1400 1401 static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 1402 unsigned num_bios) 1403 { 1404 unsigned len; 1405 1406 len = min_t(sector_t, ci->sector_count, 1407 max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); 1408 1409 /* 1410 * dm_accept_partial_bio cannot be used with duplicate bios, 1411 * so update clone_info cursor before __send_duplicate_bios(). 1412 */ 1413 ci->sector += len; 1414 ci->sector_count -= len; 1415 1416 __send_duplicate_bios(ci, ti, num_bios, &len); 1417 } 1418 1419 static bool is_abnormal_io(struct bio *bio) 1420 { 1421 bool r = false; 1422 1423 switch (bio_op(bio)) { 1424 case REQ_OP_DISCARD: 1425 case REQ_OP_SECURE_ERASE: 1426 case REQ_OP_WRITE_SAME: 1427 case REQ_OP_WRITE_ZEROES: 1428 r = true; 1429 break; 1430 } 1431 1432 return r; 1433 } 1434 1435 static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, 1436 int *result) 1437 { 1438 unsigned num_bios = 0; 1439 1440 switch (bio_op(ci->bio)) { 1441 case REQ_OP_DISCARD: 1442 num_bios = ti->num_discard_bios; 1443 break; 1444 case REQ_OP_SECURE_ERASE: 1445 num_bios = ti->num_secure_erase_bios; 1446 break; 1447 case REQ_OP_WRITE_SAME: 1448 num_bios = ti->num_write_same_bios; 1449 break; 1450 case REQ_OP_WRITE_ZEROES: 1451 num_bios = ti->num_write_zeroes_bios; 1452 break; 1453 default: 1454 return false; 1455 } 1456 1457 /* 1458 * Even though the device advertised support for this type of 1459 * request, that does not mean every target supports it, and 1460 * reconfiguration might also have changed that since the 1461 * check was performed. 1462 */ 1463 if (!num_bios) 1464 *result = -EOPNOTSUPP; 1465 else { 1466 __send_changing_extent_only(ci, ti, num_bios); 1467 *result = 0; 1468 } 1469 return true; 1470 } 1471 1472 /* 1473 * Reuse ->bi_private as hlist head for storing all dm_io instances 1474 * associated with this bio, and this bio's bi_private needs to be 1475 * stored in dm_io->data before the reuse. 1476 * 1477 * bio->bi_private is owned by fs or upper layer, so block layer won't 1478 * touch it after splitting. Meantime it won't be changed by anyone after 1479 * bio is submitted. So this reuse is safe. 1480 */ 1481 static inline struct hlist_head *dm_get_bio_hlist_head(struct bio *bio) 1482 { 1483 return (struct hlist_head *)&bio->bi_private; 1484 } 1485 1486 static void dm_queue_poll_io(struct bio *bio, struct dm_io *io) 1487 { 1488 struct hlist_head *head = dm_get_bio_hlist_head(bio); 1489 1490 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) { 1491 bio->bi_opf |= REQ_DM_POLL_LIST; 1492 /* 1493 * Save .bi_private into dm_io, so that we can reuse 1494 * .bi_private as hlist head for storing dm_io list 1495 */ 1496 io->data = bio->bi_private; 1497 1498 INIT_HLIST_HEAD(head); 1499 1500 /* tell block layer to poll for completion */ 1501 bio->bi_cookie = ~BLK_QC_T_NONE; 1502 } else { 1503 /* 1504 * bio recursed due to split, reuse original poll list, 1505 * and save bio->bi_private too. 1506 */ 1507 io->data = hlist_entry(head->first, struct dm_io, node)->data; 1508 } 1509 1510 hlist_add_head(&io->node, head); 1511 } 1512 1513 /* 1514 * Select the correct strategy for processing a non-flush bio. 1515 */ 1516 static int __split_and_process_bio(struct clone_info *ci) 1517 { 1518 struct bio *clone; 1519 struct dm_target *ti; 1520 unsigned len; 1521 int r; 1522 1523 ti = dm_table_find_target(ci->map, ci->sector); 1524 if (!ti) 1525 return -EIO; 1526 1527 if (__process_abnormal_io(ci, ti, &r)) 1528 return r; 1529 1530 /* 1531 * Only support bio polling for normal IO, and the target io is 1532 * exactly inside the dm_io instance (verified in dm_poll_dm_io) 1533 */ 1534 ci->submit_as_polled = ci->bio->bi_opf & REQ_POLLED; 1535 1536 len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); 1537 clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); 1538 __map_bio(clone); 1539 1540 ci->sector += len; 1541 ci->sector_count -= len; 1542 1543 return 0; 1544 } 1545 1546 static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 1547 struct dm_table *map, struct bio *bio) 1548 { 1549 ci->map = map; 1550 ci->io = alloc_io(md, bio); 1551 ci->bio = bio; 1552 ci->submit_as_polled = false; 1553 ci->sector = bio->bi_iter.bi_sector; 1554 ci->sector_count = bio_sectors(bio); 1555 1556 /* Shouldn't happen but sector_count was being set to 0 so... */ 1557 if (WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count)) 1558 ci->sector_count = 0; 1559 } 1560 1561 /* 1562 * Entry point to split a bio into clones and submit them to the targets. 1563 */ 1564 static void dm_split_and_process_bio(struct mapped_device *md, 1565 struct dm_table *map, struct bio *bio) 1566 { 1567 struct clone_info ci; 1568 struct bio *orig_bio = NULL; 1569 int error = 0; 1570 1571 init_clone_info(&ci, md, map, bio); 1572 1573 if (bio->bi_opf & REQ_PREFLUSH) { 1574 __send_empty_flush(&ci); 1575 /* dm_io_complete submits any data associated with flush */ 1576 goto out; 1577 } 1578 1579 error = __split_and_process_bio(&ci); 1580 ci.io->map_task = NULL; 1581 if (error || !ci.sector_count) 1582 goto out; 1583 1584 /* 1585 * Remainder must be passed to submit_bio_noacct() so it gets handled 1586 * *after* bios already submitted have been completely processed. 1587 * We take a clone of the original to store in ci.io->orig_bio to be 1588 * used by dm_end_io_acct() and for dm_io_complete() to use for 1589 * completion handling. 1590 */ 1591 orig_bio = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1592 GFP_NOIO, &md->queue->bio_split); 1593 bio_chain(orig_bio, bio); 1594 trace_block_split(orig_bio, bio->bi_iter.bi_sector); 1595 submit_bio_noacct(bio); 1596 out: 1597 if (!orig_bio) 1598 orig_bio = bio; 1599 smp_store_release(&ci.io->orig_bio, orig_bio); 1600 if (dm_io_flagged(ci.io, DM_IO_START_ACCT)) 1601 dm_start_io_acct(ci.io, NULL); 1602 1603 /* 1604 * Drop the extra reference count for non-POLLED bio, and hold one 1605 * reference for POLLED bio, which will be released in dm_poll_bio 1606 * 1607 * Add every dm_io instance into the hlist_head which is stored in 1608 * bio->bi_private, so that dm_poll_bio can poll them all. 1609 */ 1610 if (error || !ci.submit_as_polled) 1611 dm_io_dec_pending(ci.io, errno_to_blk_status(error)); 1612 else 1613 dm_queue_poll_io(bio, ci.io); 1614 } 1615 1616 static void dm_submit_bio(struct bio *bio) 1617 { 1618 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; 1619 int srcu_idx; 1620 struct dm_table *map; 1621 1622 map = dm_get_live_table(md, &srcu_idx); 1623 1624 /* If suspended, or map not yet available, queue this IO for later */ 1625 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || 1626 unlikely(!map)) { 1627 if (bio->bi_opf & REQ_NOWAIT) 1628 bio_wouldblock_error(bio); 1629 else if (bio->bi_opf & REQ_RAHEAD) 1630 bio_io_error(bio); 1631 else 1632 queue_io(md, bio); 1633 goto out; 1634 } 1635 1636 /* 1637 * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc) 1638 * otherwise associated queue_limits won't be imposed. 1639 */ 1640 if (is_abnormal_io(bio)) 1641 blk_queue_split(&bio); 1642 1643 dm_split_and_process_bio(md, map, bio); 1644 out: 1645 dm_put_live_table(md, srcu_idx); 1646 } 1647 1648 static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob, 1649 unsigned int flags) 1650 { 1651 WARN_ON_ONCE(!io->tio.inside_dm_io); 1652 1653 /* don't poll if the mapped io is done */ 1654 if (atomic_read(&io->io_count) > 1) 1655 bio_poll(&io->tio.clone, iob, flags); 1656 1657 /* bio_poll holds the last reference */ 1658 return atomic_read(&io->io_count) == 1; 1659 } 1660 1661 static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob, 1662 unsigned int flags) 1663 { 1664 struct hlist_head *head = dm_get_bio_hlist_head(bio); 1665 struct hlist_head tmp = HLIST_HEAD_INIT; 1666 struct hlist_node *next; 1667 struct dm_io *io; 1668 1669 /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */ 1670 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) 1671 return 0; 1672 1673 WARN_ON_ONCE(hlist_empty(head)); 1674 1675 hlist_move_list(head, &tmp); 1676 1677 /* 1678 * Restore .bi_private before possibly completing dm_io. 1679 * 1680 * bio_poll() is only possible once @bio has been completely 1681 * submitted via submit_bio_noacct()'s depth-first submission. 1682 * So there is no dm_queue_poll_io() race associated with 1683 * clearing REQ_DM_POLL_LIST here. 1684 */ 1685 bio->bi_opf &= ~REQ_DM_POLL_LIST; 1686 bio->bi_private = hlist_entry(tmp.first, struct dm_io, node)->data; 1687 1688 hlist_for_each_entry_safe(io, next, &tmp, node) { 1689 if (dm_poll_dm_io(io, iob, flags)) { 1690 hlist_del_init(&io->node); 1691 /* 1692 * clone_endio() has already occurred, so passing 1693 * error as 0 here doesn't override io->status 1694 */ 1695 dm_io_dec_pending(io, 0); 1696 } 1697 } 1698 1699 /* Not done? */ 1700 if (!hlist_empty(&tmp)) { 1701 bio->bi_opf |= REQ_DM_POLL_LIST; 1702 /* Reset bio->bi_private to dm_io list head */ 1703 hlist_move_list(&tmp, head); 1704 return 0; 1705 } 1706 return 1; 1707 } 1708 1709 /*----------------------------------------------------------------- 1710 * An IDR is used to keep track of allocated minor numbers. 1711 *---------------------------------------------------------------*/ 1712 static void free_minor(int minor) 1713 { 1714 spin_lock(&_minor_lock); 1715 idr_remove(&_minor_idr, minor); 1716 spin_unlock(&_minor_lock); 1717 } 1718 1719 /* 1720 * See if the device with a specific minor # is free. 1721 */ 1722 static int specific_minor(int minor) 1723 { 1724 int r; 1725 1726 if (minor >= (1 << MINORBITS)) 1727 return -EINVAL; 1728 1729 idr_preload(GFP_KERNEL); 1730 spin_lock(&_minor_lock); 1731 1732 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 1733 1734 spin_unlock(&_minor_lock); 1735 idr_preload_end(); 1736 if (r < 0) 1737 return r == -ENOSPC ? -EBUSY : r; 1738 return 0; 1739 } 1740 1741 static int next_free_minor(int *minor) 1742 { 1743 int r; 1744 1745 idr_preload(GFP_KERNEL); 1746 spin_lock(&_minor_lock); 1747 1748 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 1749 1750 spin_unlock(&_minor_lock); 1751 idr_preload_end(); 1752 if (r < 0) 1753 return r; 1754 *minor = r; 1755 return 0; 1756 } 1757 1758 static const struct block_device_operations dm_blk_dops; 1759 static const struct block_device_operations dm_rq_blk_dops; 1760 static const struct dax_operations dm_dax_ops; 1761 1762 static void dm_wq_work(struct work_struct *work); 1763 1764 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 1765 static void dm_queue_destroy_crypto_profile(struct request_queue *q) 1766 { 1767 dm_destroy_crypto_profile(q->crypto_profile); 1768 } 1769 1770 #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1771 1772 static inline void dm_queue_destroy_crypto_profile(struct request_queue *q) 1773 { 1774 } 1775 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ 1776 1777 static void cleanup_mapped_device(struct mapped_device *md) 1778 { 1779 if (md->wq) 1780 destroy_workqueue(md->wq); 1781 bioset_exit(&md->bs); 1782 bioset_exit(&md->io_bs); 1783 1784 if (md->dax_dev) { 1785 dax_remove_host(md->disk); 1786 kill_dax(md->dax_dev); 1787 put_dax(md->dax_dev); 1788 md->dax_dev = NULL; 1789 } 1790 1791 dm_cleanup_zoned_dev(md); 1792 if (md->disk) { 1793 spin_lock(&_minor_lock); 1794 md->disk->private_data = NULL; 1795 spin_unlock(&_minor_lock); 1796 if (dm_get_md_type(md) != DM_TYPE_NONE) { 1797 dm_sysfs_exit(md); 1798 del_gendisk(md->disk); 1799 } 1800 dm_queue_destroy_crypto_profile(md->queue); 1801 blk_cleanup_disk(md->disk); 1802 } 1803 1804 if (md->pending_io) { 1805 free_percpu(md->pending_io); 1806 md->pending_io = NULL; 1807 } 1808 1809 cleanup_srcu_struct(&md->io_barrier); 1810 1811 mutex_destroy(&md->suspend_lock); 1812 mutex_destroy(&md->type_lock); 1813 mutex_destroy(&md->table_devices_lock); 1814 mutex_destroy(&md->swap_bios_lock); 1815 1816 dm_mq_cleanup_mapped_device(md); 1817 } 1818 1819 /* 1820 * Allocate and initialise a blank device with a given minor. 1821 */ 1822 static struct mapped_device *alloc_dev(int minor) 1823 { 1824 int r, numa_node_id = dm_get_numa_node(); 1825 struct mapped_device *md; 1826 void *old_md; 1827 1828 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 1829 if (!md) { 1830 DMWARN("unable to allocate device, out of memory."); 1831 return NULL; 1832 } 1833 1834 if (!try_module_get(THIS_MODULE)) 1835 goto bad_module_get; 1836 1837 /* get a minor number for the dev */ 1838 if (minor == DM_ANY_MINOR) 1839 r = next_free_minor(&minor); 1840 else 1841 r = specific_minor(minor); 1842 if (r < 0) 1843 goto bad_minor; 1844 1845 r = init_srcu_struct(&md->io_barrier); 1846 if (r < 0) 1847 goto bad_io_barrier; 1848 1849 md->numa_node_id = numa_node_id; 1850 md->init_tio_pdu = false; 1851 md->type = DM_TYPE_NONE; 1852 mutex_init(&md->suspend_lock); 1853 mutex_init(&md->type_lock); 1854 mutex_init(&md->table_devices_lock); 1855 spin_lock_init(&md->deferred_lock); 1856 atomic_set(&md->holders, 1); 1857 atomic_set(&md->open_count, 0); 1858 atomic_set(&md->event_nr, 0); 1859 atomic_set(&md->uevent_seq, 0); 1860 INIT_LIST_HEAD(&md->uevent_list); 1861 INIT_LIST_HEAD(&md->table_devices); 1862 spin_lock_init(&md->uevent_lock); 1863 1864 /* 1865 * default to bio-based until DM table is loaded and md->type 1866 * established. If request-based table is loaded: blk-mq will 1867 * override accordingly. 1868 */ 1869 md->disk = blk_alloc_disk(md->numa_node_id); 1870 if (!md->disk) 1871 goto bad; 1872 md->queue = md->disk->queue; 1873 1874 init_waitqueue_head(&md->wait); 1875 INIT_WORK(&md->work, dm_wq_work); 1876 init_waitqueue_head(&md->eventq); 1877 init_completion(&md->kobj_holder.completion); 1878 1879 md->swap_bios = get_swap_bios(); 1880 sema_init(&md->swap_bios_semaphore, md->swap_bios); 1881 mutex_init(&md->swap_bios_lock); 1882 1883 md->disk->major = _major; 1884 md->disk->first_minor = minor; 1885 md->disk->minors = 1; 1886 md->disk->flags |= GENHD_FL_NO_PART; 1887 md->disk->fops = &dm_blk_dops; 1888 md->disk->queue = md->queue; 1889 md->disk->private_data = md; 1890 sprintf(md->disk->disk_name, "dm-%d", minor); 1891 1892 if (IS_ENABLED(CONFIG_FS_DAX)) { 1893 md->dax_dev = alloc_dax(md, &dm_dax_ops); 1894 if (IS_ERR(md->dax_dev)) { 1895 md->dax_dev = NULL; 1896 goto bad; 1897 } 1898 set_dax_nocache(md->dax_dev); 1899 set_dax_nomc(md->dax_dev); 1900 if (dax_add_host(md->dax_dev, md->disk)) 1901 goto bad; 1902 } 1903 1904 format_dev_t(md->name, MKDEV(_major, minor)); 1905 1906 md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name); 1907 if (!md->wq) 1908 goto bad; 1909 1910 md->pending_io = alloc_percpu(unsigned long); 1911 if (!md->pending_io) 1912 goto bad; 1913 1914 dm_stats_init(&md->stats); 1915 1916 /* Populate the mapping, nobody knows we exist yet */ 1917 spin_lock(&_minor_lock); 1918 old_md = idr_replace(&_minor_idr, md, minor); 1919 spin_unlock(&_minor_lock); 1920 1921 BUG_ON(old_md != MINOR_ALLOCED); 1922 1923 return md; 1924 1925 bad: 1926 cleanup_mapped_device(md); 1927 bad_io_barrier: 1928 free_minor(minor); 1929 bad_minor: 1930 module_put(THIS_MODULE); 1931 bad_module_get: 1932 kvfree(md); 1933 return NULL; 1934 } 1935 1936 static void unlock_fs(struct mapped_device *md); 1937 1938 static void free_dev(struct mapped_device *md) 1939 { 1940 int minor = MINOR(disk_devt(md->disk)); 1941 1942 unlock_fs(md); 1943 1944 cleanup_mapped_device(md); 1945 1946 free_table_devices(&md->table_devices); 1947 dm_stats_cleanup(&md->stats); 1948 free_minor(minor); 1949 1950 module_put(THIS_MODULE); 1951 kvfree(md); 1952 } 1953 1954 static int __bind_mempools(struct mapped_device *md, struct dm_table *t) 1955 { 1956 struct dm_md_mempools *p = dm_table_get_md_mempools(t); 1957 int ret = 0; 1958 1959 if (dm_table_bio_based(t)) { 1960 /* 1961 * The md may already have mempools that need changing. 1962 * If so, reload bioset because front_pad may have changed 1963 * because a different table was loaded. 1964 */ 1965 bioset_exit(&md->bs); 1966 bioset_exit(&md->io_bs); 1967 1968 } else if (bioset_initialized(&md->bs)) { 1969 /* 1970 * There's no need to reload with request-based dm 1971 * because the size of front_pad doesn't change. 1972 * Note for future: If you are to reload bioset, 1973 * prep-ed requests in the queue may refer 1974 * to bio from the old bioset, so you must walk 1975 * through the queue to unprep. 1976 */ 1977 goto out; 1978 } 1979 1980 BUG_ON(!p || 1981 bioset_initialized(&md->bs) || 1982 bioset_initialized(&md->io_bs)); 1983 1984 ret = bioset_init_from_src(&md->bs, &p->bs); 1985 if (ret) 1986 goto out; 1987 ret = bioset_init_from_src(&md->io_bs, &p->io_bs); 1988 if (ret) 1989 bioset_exit(&md->bs); 1990 out: 1991 /* mempool bind completed, no longer need any mempools in the table */ 1992 dm_table_free_md_mempools(t); 1993 return ret; 1994 } 1995 1996 /* 1997 * Bind a table to the device. 1998 */ 1999 static void event_callback(void *context) 2000 { 2001 unsigned long flags; 2002 LIST_HEAD(uevents); 2003 struct mapped_device *md = (struct mapped_device *) context; 2004 2005 spin_lock_irqsave(&md->uevent_lock, flags); 2006 list_splice_init(&md->uevent_list, &uevents); 2007 spin_unlock_irqrestore(&md->uevent_lock, flags); 2008 2009 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 2010 2011 atomic_inc(&md->event_nr); 2012 wake_up(&md->eventq); 2013 dm_issue_global_event(); 2014 } 2015 2016 /* 2017 * Returns old map, which caller must destroy. 2018 */ 2019 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2020 struct queue_limits *limits) 2021 { 2022 struct dm_table *old_map; 2023 sector_t size; 2024 int ret; 2025 2026 lockdep_assert_held(&md->suspend_lock); 2027 2028 size = dm_table_get_size(t); 2029 2030 /* 2031 * Wipe any geometry if the size of the table changed. 2032 */ 2033 if (size != dm_get_size(md)) 2034 memset(&md->geometry, 0, sizeof(md->geometry)); 2035 2036 if (!get_capacity(md->disk)) 2037 set_capacity(md->disk, size); 2038 else 2039 set_capacity_and_notify(md->disk, size); 2040 2041 dm_table_event_callback(t, event_callback, md); 2042 2043 if (dm_table_request_based(t)) { 2044 /* 2045 * Leverage the fact that request-based DM targets are 2046 * immutable singletons - used to optimize dm_mq_queue_rq. 2047 */ 2048 md->immutable_target = dm_table_get_immutable_target(t); 2049 } 2050 2051 ret = __bind_mempools(md, t); 2052 if (ret) { 2053 old_map = ERR_PTR(ret); 2054 goto out; 2055 } 2056 2057 ret = dm_table_set_restrictions(t, md->queue, limits); 2058 if (ret) { 2059 old_map = ERR_PTR(ret); 2060 goto out; 2061 } 2062 2063 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2064 rcu_assign_pointer(md->map, (void *)t); 2065 md->immutable_target_type = dm_table_get_immutable_target_type(t); 2066 2067 if (old_map) 2068 dm_sync_table(md); 2069 out: 2070 return old_map; 2071 } 2072 2073 /* 2074 * Returns unbound table for the caller to free. 2075 */ 2076 static struct dm_table *__unbind(struct mapped_device *md) 2077 { 2078 struct dm_table *map = rcu_dereference_protected(md->map, 1); 2079 2080 if (!map) 2081 return NULL; 2082 2083 dm_table_event_callback(map, NULL, NULL); 2084 RCU_INIT_POINTER(md->map, NULL); 2085 dm_sync_table(md); 2086 2087 return map; 2088 } 2089 2090 /* 2091 * Constructor for a new device. 2092 */ 2093 int dm_create(int minor, struct mapped_device **result) 2094 { 2095 struct mapped_device *md; 2096 2097 md = alloc_dev(minor); 2098 if (!md) 2099 return -ENXIO; 2100 2101 dm_ima_reset_data(md); 2102 2103 *result = md; 2104 return 0; 2105 } 2106 2107 /* 2108 * Functions to manage md->type. 2109 * All are required to hold md->type_lock. 2110 */ 2111 void dm_lock_md_type(struct mapped_device *md) 2112 { 2113 mutex_lock(&md->type_lock); 2114 } 2115 2116 void dm_unlock_md_type(struct mapped_device *md) 2117 { 2118 mutex_unlock(&md->type_lock); 2119 } 2120 2121 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2122 { 2123 BUG_ON(!mutex_is_locked(&md->type_lock)); 2124 md->type = type; 2125 } 2126 2127 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2128 { 2129 return md->type; 2130 } 2131 2132 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 2133 { 2134 return md->immutable_target_type; 2135 } 2136 2137 /* 2138 * The queue_limits are only valid as long as you have a reference 2139 * count on 'md'. 2140 */ 2141 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2142 { 2143 BUG_ON(!atomic_read(&md->holders)); 2144 return &md->queue->limits; 2145 } 2146 EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2147 2148 /* 2149 * Setup the DM device's queue based on md's type 2150 */ 2151 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 2152 { 2153 enum dm_queue_mode type = dm_table_get_type(t); 2154 struct queue_limits limits; 2155 int r; 2156 2157 switch (type) { 2158 case DM_TYPE_REQUEST_BASED: 2159 md->disk->fops = &dm_rq_blk_dops; 2160 r = dm_mq_init_request_queue(md, t); 2161 if (r) { 2162 DMERR("Cannot initialize queue for request-based dm mapped device"); 2163 return r; 2164 } 2165 break; 2166 case DM_TYPE_BIO_BASED: 2167 case DM_TYPE_DAX_BIO_BASED: 2168 break; 2169 case DM_TYPE_NONE: 2170 WARN_ON_ONCE(true); 2171 break; 2172 } 2173 2174 r = dm_calculate_queue_limits(t, &limits); 2175 if (r) { 2176 DMERR("Cannot calculate initial queue limits"); 2177 return r; 2178 } 2179 r = dm_table_set_restrictions(t, md->queue, &limits); 2180 if (r) 2181 return r; 2182 2183 r = add_disk(md->disk); 2184 if (r) 2185 return r; 2186 2187 r = dm_sysfs_init(md); 2188 if (r) { 2189 del_gendisk(md->disk); 2190 return r; 2191 } 2192 md->type = type; 2193 return 0; 2194 } 2195 2196 struct mapped_device *dm_get_md(dev_t dev) 2197 { 2198 struct mapped_device *md; 2199 unsigned minor = MINOR(dev); 2200 2201 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 2202 return NULL; 2203 2204 spin_lock(&_minor_lock); 2205 2206 md = idr_find(&_minor_idr, minor); 2207 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 2208 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2209 md = NULL; 2210 goto out; 2211 } 2212 dm_get(md); 2213 out: 2214 spin_unlock(&_minor_lock); 2215 2216 return md; 2217 } 2218 EXPORT_SYMBOL_GPL(dm_get_md); 2219 2220 void *dm_get_mdptr(struct mapped_device *md) 2221 { 2222 return md->interface_ptr; 2223 } 2224 2225 void dm_set_mdptr(struct mapped_device *md, void *ptr) 2226 { 2227 md->interface_ptr = ptr; 2228 } 2229 2230 void dm_get(struct mapped_device *md) 2231 { 2232 atomic_inc(&md->holders); 2233 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2234 } 2235 2236 int dm_hold(struct mapped_device *md) 2237 { 2238 spin_lock(&_minor_lock); 2239 if (test_bit(DMF_FREEING, &md->flags)) { 2240 spin_unlock(&_minor_lock); 2241 return -EBUSY; 2242 } 2243 dm_get(md); 2244 spin_unlock(&_minor_lock); 2245 return 0; 2246 } 2247 EXPORT_SYMBOL_GPL(dm_hold); 2248 2249 const char *dm_device_name(struct mapped_device *md) 2250 { 2251 return md->name; 2252 } 2253 EXPORT_SYMBOL_GPL(dm_device_name); 2254 2255 static void __dm_destroy(struct mapped_device *md, bool wait) 2256 { 2257 struct dm_table *map; 2258 int srcu_idx; 2259 2260 might_sleep(); 2261 2262 spin_lock(&_minor_lock); 2263 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2264 set_bit(DMF_FREEING, &md->flags); 2265 spin_unlock(&_minor_lock); 2266 2267 blk_set_queue_dying(md->queue); 2268 2269 /* 2270 * Take suspend_lock so that presuspend and postsuspend methods 2271 * do not race with internal suspend. 2272 */ 2273 mutex_lock(&md->suspend_lock); 2274 map = dm_get_live_table(md, &srcu_idx); 2275 if (!dm_suspended_md(md)) { 2276 dm_table_presuspend_targets(map); 2277 set_bit(DMF_SUSPENDED, &md->flags); 2278 set_bit(DMF_POST_SUSPENDING, &md->flags); 2279 dm_table_postsuspend_targets(map); 2280 } 2281 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2282 dm_put_live_table(md, srcu_idx); 2283 mutex_unlock(&md->suspend_lock); 2284 2285 /* 2286 * Rare, but there may be I/O requests still going to complete, 2287 * for example. Wait for all references to disappear. 2288 * No one should increment the reference count of the mapped_device, 2289 * after the mapped_device state becomes DMF_FREEING. 2290 */ 2291 if (wait) 2292 while (atomic_read(&md->holders)) 2293 msleep(1); 2294 else if (atomic_read(&md->holders)) 2295 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2296 dm_device_name(md), atomic_read(&md->holders)); 2297 2298 dm_table_destroy(__unbind(md)); 2299 free_dev(md); 2300 } 2301 2302 void dm_destroy(struct mapped_device *md) 2303 { 2304 __dm_destroy(md, true); 2305 } 2306 2307 void dm_destroy_immediate(struct mapped_device *md) 2308 { 2309 __dm_destroy(md, false); 2310 } 2311 2312 void dm_put(struct mapped_device *md) 2313 { 2314 atomic_dec(&md->holders); 2315 } 2316 EXPORT_SYMBOL_GPL(dm_put); 2317 2318 static bool dm_in_flight_bios(struct mapped_device *md) 2319 { 2320 int cpu; 2321 unsigned long sum = 0; 2322 2323 for_each_possible_cpu(cpu) 2324 sum += *per_cpu_ptr(md->pending_io, cpu); 2325 2326 return sum != 0; 2327 } 2328 2329 static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state) 2330 { 2331 int r = 0; 2332 DEFINE_WAIT(wait); 2333 2334 while (true) { 2335 prepare_to_wait(&md->wait, &wait, task_state); 2336 2337 if (!dm_in_flight_bios(md)) 2338 break; 2339 2340 if (signal_pending_state(task_state, current)) { 2341 r = -EINTR; 2342 break; 2343 } 2344 2345 io_schedule(); 2346 } 2347 finish_wait(&md->wait, &wait); 2348 2349 smp_rmb(); 2350 2351 return r; 2352 } 2353 2354 static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state) 2355 { 2356 int r = 0; 2357 2358 if (!queue_is_mq(md->queue)) 2359 return dm_wait_for_bios_completion(md, task_state); 2360 2361 while (true) { 2362 if (!blk_mq_queue_inflight(md->queue)) 2363 break; 2364 2365 if (signal_pending_state(task_state, current)) { 2366 r = -EINTR; 2367 break; 2368 } 2369 2370 msleep(5); 2371 } 2372 2373 return r; 2374 } 2375 2376 /* 2377 * Process the deferred bios 2378 */ 2379 static void dm_wq_work(struct work_struct *work) 2380 { 2381 struct mapped_device *md = container_of(work, struct mapped_device, work); 2382 struct bio *bio; 2383 2384 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2385 spin_lock_irq(&md->deferred_lock); 2386 bio = bio_list_pop(&md->deferred); 2387 spin_unlock_irq(&md->deferred_lock); 2388 2389 if (!bio) 2390 break; 2391 2392 submit_bio_noacct(bio); 2393 } 2394 } 2395 2396 static void dm_queue_flush(struct mapped_device *md) 2397 { 2398 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2399 smp_mb__after_atomic(); 2400 queue_work(md->wq, &md->work); 2401 } 2402 2403 /* 2404 * Swap in a new table, returning the old one for the caller to destroy. 2405 */ 2406 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 2407 { 2408 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2409 struct queue_limits limits; 2410 int r; 2411 2412 mutex_lock(&md->suspend_lock); 2413 2414 /* device must be suspended */ 2415 if (!dm_suspended_md(md)) 2416 goto out; 2417 2418 /* 2419 * If the new table has no data devices, retain the existing limits. 2420 * This helps multipath with queue_if_no_path if all paths disappear, 2421 * then new I/O is queued based on these limits, and then some paths 2422 * reappear. 2423 */ 2424 if (dm_table_has_no_data_devices(table)) { 2425 live_map = dm_get_live_table_fast(md); 2426 if (live_map) 2427 limits = md->queue->limits; 2428 dm_put_live_table_fast(md); 2429 } 2430 2431 if (!live_map) { 2432 r = dm_calculate_queue_limits(table, &limits); 2433 if (r) { 2434 map = ERR_PTR(r); 2435 goto out; 2436 } 2437 } 2438 2439 map = __bind(md, table, &limits); 2440 dm_issue_global_event(); 2441 2442 out: 2443 mutex_unlock(&md->suspend_lock); 2444 return map; 2445 } 2446 2447 /* 2448 * Functions to lock and unlock any filesystem running on the 2449 * device. 2450 */ 2451 static int lock_fs(struct mapped_device *md) 2452 { 2453 int r; 2454 2455 WARN_ON(test_bit(DMF_FROZEN, &md->flags)); 2456 2457 r = freeze_bdev(md->disk->part0); 2458 if (!r) 2459 set_bit(DMF_FROZEN, &md->flags); 2460 return r; 2461 } 2462 2463 static void unlock_fs(struct mapped_device *md) 2464 { 2465 if (!test_bit(DMF_FROZEN, &md->flags)) 2466 return; 2467 thaw_bdev(md->disk->part0); 2468 clear_bit(DMF_FROZEN, &md->flags); 2469 } 2470 2471 /* 2472 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2473 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2474 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2475 * 2476 * If __dm_suspend returns 0, the device is completely quiescent 2477 * now. There is no request-processing activity. All new requests 2478 * are being added to md->deferred list. 2479 */ 2480 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2481 unsigned suspend_flags, unsigned int task_state, 2482 int dmf_suspended_flag) 2483 { 2484 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2485 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2486 int r; 2487 2488 lockdep_assert_held(&md->suspend_lock); 2489 2490 /* 2491 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 2492 * This flag is cleared before dm_suspend returns. 2493 */ 2494 if (noflush) 2495 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2496 else 2497 DMDEBUG("%s: suspending with flush", dm_device_name(md)); 2498 2499 /* 2500 * This gets reverted if there's an error later and the targets 2501 * provide the .presuspend_undo hook. 2502 */ 2503 dm_table_presuspend_targets(map); 2504 2505 /* 2506 * Flush I/O to the device. 2507 * Any I/O submitted after lock_fs() may not be flushed. 2508 * noflush takes precedence over do_lockfs. 2509 * (lock_fs() flushes I/Os and waits for them to complete.) 2510 */ 2511 if (!noflush && do_lockfs) { 2512 r = lock_fs(md); 2513 if (r) { 2514 dm_table_presuspend_undo_targets(map); 2515 return r; 2516 } 2517 } 2518 2519 /* 2520 * Here we must make sure that no processes are submitting requests 2521 * to target drivers i.e. no one may be executing 2522 * dm_split_and_process_bio from dm_submit_bio. 2523 * 2524 * To get all processes out of dm_split_and_process_bio in dm_submit_bio, 2525 * we take the write lock. To prevent any process from reentering 2526 * dm_split_and_process_bio from dm_submit_bio and quiesce the thread 2527 * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call 2528 * flush_workqueue(md->wq). 2529 */ 2530 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2531 if (map) 2532 synchronize_srcu(&md->io_barrier); 2533 2534 /* 2535 * Stop md->queue before flushing md->wq in case request-based 2536 * dm defers requests to md->wq from md->queue. 2537 */ 2538 if (dm_request_based(md)) 2539 dm_stop_queue(md->queue); 2540 2541 flush_workqueue(md->wq); 2542 2543 /* 2544 * At this point no more requests are entering target request routines. 2545 * We call dm_wait_for_completion to wait for all existing requests 2546 * to finish. 2547 */ 2548 r = dm_wait_for_completion(md, task_state); 2549 if (!r) 2550 set_bit(dmf_suspended_flag, &md->flags); 2551 2552 if (noflush) 2553 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2554 if (map) 2555 synchronize_srcu(&md->io_barrier); 2556 2557 /* were we interrupted ? */ 2558 if (r < 0) { 2559 dm_queue_flush(md); 2560 2561 if (dm_request_based(md)) 2562 dm_start_queue(md->queue); 2563 2564 unlock_fs(md); 2565 dm_table_presuspend_undo_targets(map); 2566 /* pushback list is already flushed, so skip flush */ 2567 } 2568 2569 return r; 2570 } 2571 2572 /* 2573 * We need to be able to change a mapping table under a mounted 2574 * filesystem. For example we might want to move some data in 2575 * the background. Before the table can be swapped with 2576 * dm_bind_table, dm_suspend must be called to flush any in 2577 * flight bios and ensure that any further io gets deferred. 2578 */ 2579 /* 2580 * Suspend mechanism in request-based dm. 2581 * 2582 * 1. Flush all I/Os by lock_fs() if needed. 2583 * 2. Stop dispatching any I/O by stopping the request_queue. 2584 * 3. Wait for all in-flight I/Os to be completed or requeued. 2585 * 2586 * To abort suspend, start the request_queue. 2587 */ 2588 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2589 { 2590 struct dm_table *map = NULL; 2591 int r = 0; 2592 2593 retry: 2594 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2595 2596 if (dm_suspended_md(md)) { 2597 r = -EINVAL; 2598 goto out_unlock; 2599 } 2600 2601 if (dm_suspended_internally_md(md)) { 2602 /* already internally suspended, wait for internal resume */ 2603 mutex_unlock(&md->suspend_lock); 2604 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2605 if (r) 2606 return r; 2607 goto retry; 2608 } 2609 2610 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2611 2612 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2613 if (r) 2614 goto out_unlock; 2615 2616 set_bit(DMF_POST_SUSPENDING, &md->flags); 2617 dm_table_postsuspend_targets(map); 2618 clear_bit(DMF_POST_SUSPENDING, &md->flags); 2619 2620 out_unlock: 2621 mutex_unlock(&md->suspend_lock); 2622 return r; 2623 } 2624 2625 static int __dm_resume(struct mapped_device *md, struct dm_table *map) 2626 { 2627 if (map) { 2628 int r = dm_table_resume_targets(map); 2629 if (r) 2630 return r; 2631 } 2632 2633 dm_queue_flush(md); 2634 2635 /* 2636 * Flushing deferred I/Os must be done after targets are resumed 2637 * so that mapping of targets can work correctly. 2638 * Request-based dm is queueing the deferred I/Os in its request_queue. 2639 */ 2640 if (dm_request_based(md)) 2641 dm_start_queue(md->queue); 2642 2643 unlock_fs(md); 2644 2645 return 0; 2646 } 2647 2648 int dm_resume(struct mapped_device *md) 2649 { 2650 int r; 2651 struct dm_table *map = NULL; 2652 2653 retry: 2654 r = -EINVAL; 2655 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2656 2657 if (!dm_suspended_md(md)) 2658 goto out; 2659 2660 if (dm_suspended_internally_md(md)) { 2661 /* already internally suspended, wait for internal resume */ 2662 mutex_unlock(&md->suspend_lock); 2663 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2664 if (r) 2665 return r; 2666 goto retry; 2667 } 2668 2669 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2670 if (!map || !dm_table_get_size(map)) 2671 goto out; 2672 2673 r = __dm_resume(md, map); 2674 if (r) 2675 goto out; 2676 2677 clear_bit(DMF_SUSPENDED, &md->flags); 2678 out: 2679 mutex_unlock(&md->suspend_lock); 2680 2681 return r; 2682 } 2683 2684 /* 2685 * Internal suspend/resume works like userspace-driven suspend. It waits 2686 * until all bios finish and prevents issuing new bios to the target drivers. 2687 * It may be used only from the kernel. 2688 */ 2689 2690 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2691 { 2692 struct dm_table *map = NULL; 2693 2694 lockdep_assert_held(&md->suspend_lock); 2695 2696 if (md->internal_suspend_count++) 2697 return; /* nested internal suspend */ 2698 2699 if (dm_suspended_md(md)) { 2700 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2701 return; /* nest suspend */ 2702 } 2703 2704 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2705 2706 /* 2707 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2708 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2709 * would require changing .presuspend to return an error -- avoid this 2710 * until there is a need for more elaborate variants of internal suspend. 2711 */ 2712 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2713 DMF_SUSPENDED_INTERNALLY); 2714 2715 set_bit(DMF_POST_SUSPENDING, &md->flags); 2716 dm_table_postsuspend_targets(map); 2717 clear_bit(DMF_POST_SUSPENDING, &md->flags); 2718 } 2719 2720 static void __dm_internal_resume(struct mapped_device *md) 2721 { 2722 BUG_ON(!md->internal_suspend_count); 2723 2724 if (--md->internal_suspend_count) 2725 return; /* resume from nested internal suspend */ 2726 2727 if (dm_suspended_md(md)) 2728 goto done; /* resume from nested suspend */ 2729 2730 /* 2731 * NOTE: existing callers don't need to call dm_table_resume_targets 2732 * (which may fail -- so best to avoid it for now by passing NULL map) 2733 */ 2734 (void) __dm_resume(md, NULL); 2735 2736 done: 2737 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2738 smp_mb__after_atomic(); 2739 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2740 } 2741 2742 void dm_internal_suspend_noflush(struct mapped_device *md) 2743 { 2744 mutex_lock(&md->suspend_lock); 2745 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2746 mutex_unlock(&md->suspend_lock); 2747 } 2748 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2749 2750 void dm_internal_resume(struct mapped_device *md) 2751 { 2752 mutex_lock(&md->suspend_lock); 2753 __dm_internal_resume(md); 2754 mutex_unlock(&md->suspend_lock); 2755 } 2756 EXPORT_SYMBOL_GPL(dm_internal_resume); 2757 2758 /* 2759 * Fast variants of internal suspend/resume hold md->suspend_lock, 2760 * which prevents interaction with userspace-driven suspend. 2761 */ 2762 2763 void dm_internal_suspend_fast(struct mapped_device *md) 2764 { 2765 mutex_lock(&md->suspend_lock); 2766 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2767 return; 2768 2769 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2770 synchronize_srcu(&md->io_barrier); 2771 flush_workqueue(md->wq); 2772 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2773 } 2774 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2775 2776 void dm_internal_resume_fast(struct mapped_device *md) 2777 { 2778 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2779 goto done; 2780 2781 dm_queue_flush(md); 2782 2783 done: 2784 mutex_unlock(&md->suspend_lock); 2785 } 2786 EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2787 2788 /*----------------------------------------------------------------- 2789 * Event notification. 2790 *---------------------------------------------------------------*/ 2791 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 2792 unsigned cookie) 2793 { 2794 int r; 2795 unsigned noio_flag; 2796 char udev_cookie[DM_COOKIE_LENGTH]; 2797 char *envp[] = { udev_cookie, NULL }; 2798 2799 noio_flag = memalloc_noio_save(); 2800 2801 if (!cookie) 2802 r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 2803 else { 2804 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 2805 DM_COOKIE_ENV_VAR_NAME, cookie); 2806 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 2807 action, envp); 2808 } 2809 2810 memalloc_noio_restore(noio_flag); 2811 2812 return r; 2813 } 2814 2815 uint32_t dm_next_uevent_seq(struct mapped_device *md) 2816 { 2817 return atomic_add_return(1, &md->uevent_seq); 2818 } 2819 2820 uint32_t dm_get_event_nr(struct mapped_device *md) 2821 { 2822 return atomic_read(&md->event_nr); 2823 } 2824 2825 int dm_wait_event(struct mapped_device *md, int event_nr) 2826 { 2827 return wait_event_interruptible(md->eventq, 2828 (event_nr != atomic_read(&md->event_nr))); 2829 } 2830 2831 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 2832 { 2833 unsigned long flags; 2834 2835 spin_lock_irqsave(&md->uevent_lock, flags); 2836 list_add(elist, &md->uevent_list); 2837 spin_unlock_irqrestore(&md->uevent_lock, flags); 2838 } 2839 2840 /* 2841 * The gendisk is only valid as long as you have a reference 2842 * count on 'md'. 2843 */ 2844 struct gendisk *dm_disk(struct mapped_device *md) 2845 { 2846 return md->disk; 2847 } 2848 EXPORT_SYMBOL_GPL(dm_disk); 2849 2850 struct kobject *dm_kobject(struct mapped_device *md) 2851 { 2852 return &md->kobj_holder.kobj; 2853 } 2854 2855 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2856 { 2857 struct mapped_device *md; 2858 2859 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2860 2861 spin_lock(&_minor_lock); 2862 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2863 md = NULL; 2864 goto out; 2865 } 2866 dm_get(md); 2867 out: 2868 spin_unlock(&_minor_lock); 2869 2870 return md; 2871 } 2872 2873 int dm_suspended_md(struct mapped_device *md) 2874 { 2875 return test_bit(DMF_SUSPENDED, &md->flags); 2876 } 2877 2878 static int dm_post_suspending_md(struct mapped_device *md) 2879 { 2880 return test_bit(DMF_POST_SUSPENDING, &md->flags); 2881 } 2882 2883 int dm_suspended_internally_md(struct mapped_device *md) 2884 { 2885 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2886 } 2887 2888 int dm_test_deferred_remove_flag(struct mapped_device *md) 2889 { 2890 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 2891 } 2892 2893 int dm_suspended(struct dm_target *ti) 2894 { 2895 return dm_suspended_md(ti->table->md); 2896 } 2897 EXPORT_SYMBOL_GPL(dm_suspended); 2898 2899 int dm_post_suspending(struct dm_target *ti) 2900 { 2901 return dm_post_suspending_md(ti->table->md); 2902 } 2903 EXPORT_SYMBOL_GPL(dm_post_suspending); 2904 2905 int dm_noflush_suspending(struct dm_target *ti) 2906 { 2907 return __noflush_suspending(ti->table->md); 2908 } 2909 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2910 2911 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 2912 unsigned integrity, unsigned per_io_data_size, 2913 unsigned min_pool_size) 2914 { 2915 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 2916 unsigned int pool_size = 0; 2917 unsigned int front_pad, io_front_pad; 2918 int ret; 2919 2920 if (!pools) 2921 return NULL; 2922 2923 switch (type) { 2924 case DM_TYPE_BIO_BASED: 2925 case DM_TYPE_DAX_BIO_BASED: 2926 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 2927 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET; 2928 io_front_pad = roundup(per_io_data_size, __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET; 2929 ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); 2930 if (ret) 2931 goto out; 2932 if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) 2933 goto out; 2934 break; 2935 case DM_TYPE_REQUEST_BASED: 2936 pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 2937 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2938 /* per_io_data_size is used for blk-mq pdu at queue allocation */ 2939 break; 2940 default: 2941 BUG(); 2942 } 2943 2944 ret = bioset_init(&pools->bs, pool_size, front_pad, 0); 2945 if (ret) 2946 goto out; 2947 2948 if (integrity && bioset_integrity_create(&pools->bs, pool_size)) 2949 goto out; 2950 2951 return pools; 2952 2953 out: 2954 dm_free_md_mempools(pools); 2955 2956 return NULL; 2957 } 2958 2959 void dm_free_md_mempools(struct dm_md_mempools *pools) 2960 { 2961 if (!pools) 2962 return; 2963 2964 bioset_exit(&pools->bs); 2965 bioset_exit(&pools->io_bs); 2966 2967 kfree(pools); 2968 } 2969 2970 struct dm_pr { 2971 u64 old_key; 2972 u64 new_key; 2973 u32 flags; 2974 bool fail_early; 2975 }; 2976 2977 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 2978 void *data) 2979 { 2980 struct mapped_device *md = bdev->bd_disk->private_data; 2981 struct dm_table *table; 2982 struct dm_target *ti; 2983 int ret = -ENOTTY, srcu_idx; 2984 2985 table = dm_get_live_table(md, &srcu_idx); 2986 if (!table || !dm_table_get_size(table)) 2987 goto out; 2988 2989 /* We only support devices that have a single target */ 2990 if (dm_table_get_num_targets(table) != 1) 2991 goto out; 2992 ti = dm_table_get_target(table, 0); 2993 2994 ret = -EINVAL; 2995 if (!ti->type->iterate_devices) 2996 goto out; 2997 2998 ret = ti->type->iterate_devices(ti, fn, data); 2999 out: 3000 dm_put_live_table(md, srcu_idx); 3001 return ret; 3002 } 3003 3004 /* 3005 * For register / unregister we need to manually call out to every path. 3006 */ 3007 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 3008 sector_t start, sector_t len, void *data) 3009 { 3010 struct dm_pr *pr = data; 3011 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3012 3013 if (!ops || !ops->pr_register) 3014 return -EOPNOTSUPP; 3015 return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 3016 } 3017 3018 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 3019 u32 flags) 3020 { 3021 struct dm_pr pr = { 3022 .old_key = old_key, 3023 .new_key = new_key, 3024 .flags = flags, 3025 .fail_early = true, 3026 }; 3027 int ret; 3028 3029 ret = dm_call_pr(bdev, __dm_pr_register, &pr); 3030 if (ret && new_key) { 3031 /* unregister all paths if we failed to register any path */ 3032 pr.old_key = new_key; 3033 pr.new_key = 0; 3034 pr.flags = 0; 3035 pr.fail_early = false; 3036 dm_call_pr(bdev, __dm_pr_register, &pr); 3037 } 3038 3039 return ret; 3040 } 3041 3042 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 3043 u32 flags) 3044 { 3045 struct mapped_device *md = bdev->bd_disk->private_data; 3046 const struct pr_ops *ops; 3047 int r, srcu_idx; 3048 3049 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3050 if (r < 0) 3051 goto out; 3052 3053 ops = bdev->bd_disk->fops->pr_ops; 3054 if (ops && ops->pr_reserve) 3055 r = ops->pr_reserve(bdev, key, type, flags); 3056 else 3057 r = -EOPNOTSUPP; 3058 out: 3059 dm_unprepare_ioctl(md, srcu_idx); 3060 return r; 3061 } 3062 3063 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 3064 { 3065 struct mapped_device *md = bdev->bd_disk->private_data; 3066 const struct pr_ops *ops; 3067 int r, srcu_idx; 3068 3069 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3070 if (r < 0) 3071 goto out; 3072 3073 ops = bdev->bd_disk->fops->pr_ops; 3074 if (ops && ops->pr_release) 3075 r = ops->pr_release(bdev, key, type); 3076 else 3077 r = -EOPNOTSUPP; 3078 out: 3079 dm_unprepare_ioctl(md, srcu_idx); 3080 return r; 3081 } 3082 3083 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 3084 enum pr_type type, bool abort) 3085 { 3086 struct mapped_device *md = bdev->bd_disk->private_data; 3087 const struct pr_ops *ops; 3088 int r, srcu_idx; 3089 3090 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3091 if (r < 0) 3092 goto out; 3093 3094 ops = bdev->bd_disk->fops->pr_ops; 3095 if (ops && ops->pr_preempt) 3096 r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 3097 else 3098 r = -EOPNOTSUPP; 3099 out: 3100 dm_unprepare_ioctl(md, srcu_idx); 3101 return r; 3102 } 3103 3104 static int dm_pr_clear(struct block_device *bdev, u64 key) 3105 { 3106 struct mapped_device *md = bdev->bd_disk->private_data; 3107 const struct pr_ops *ops; 3108 int r, srcu_idx; 3109 3110 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3111 if (r < 0) 3112 goto out; 3113 3114 ops = bdev->bd_disk->fops->pr_ops; 3115 if (ops && ops->pr_clear) 3116 r = ops->pr_clear(bdev, key); 3117 else 3118 r = -EOPNOTSUPP; 3119 out: 3120 dm_unprepare_ioctl(md, srcu_idx); 3121 return r; 3122 } 3123 3124 static const struct pr_ops dm_pr_ops = { 3125 .pr_register = dm_pr_register, 3126 .pr_reserve = dm_pr_reserve, 3127 .pr_release = dm_pr_release, 3128 .pr_preempt = dm_pr_preempt, 3129 .pr_clear = dm_pr_clear, 3130 }; 3131 3132 static const struct block_device_operations dm_blk_dops = { 3133 .submit_bio = dm_submit_bio, 3134 .poll_bio = dm_poll_bio, 3135 .open = dm_blk_open, 3136 .release = dm_blk_close, 3137 .ioctl = dm_blk_ioctl, 3138 .getgeo = dm_blk_getgeo, 3139 .report_zones = dm_blk_report_zones, 3140 .pr_ops = &dm_pr_ops, 3141 .owner = THIS_MODULE 3142 }; 3143 3144 static const struct block_device_operations dm_rq_blk_dops = { 3145 .open = dm_blk_open, 3146 .release = dm_blk_close, 3147 .ioctl = dm_blk_ioctl, 3148 .getgeo = dm_blk_getgeo, 3149 .pr_ops = &dm_pr_ops, 3150 .owner = THIS_MODULE 3151 }; 3152 3153 static const struct dax_operations dm_dax_ops = { 3154 .direct_access = dm_dax_direct_access, 3155 .zero_page_range = dm_dax_zero_page_range, 3156 }; 3157 3158 /* 3159 * module hooks 3160 */ 3161 module_init(dm_init); 3162 module_exit(dm_exit); 3163 3164 module_param(major, uint, 0); 3165 MODULE_PARM_DESC(major, "The major number of the device mapper"); 3166 3167 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3168 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3169 3170 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3171 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3172 3173 module_param(swap_bios, int, S_IRUGO | S_IWUSR); 3174 MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs"); 3175 3176 MODULE_DESCRIPTION(DM_NAME " driver"); 3177 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3178 MODULE_LICENSE("GPL"); 3179