1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-core.h" 9 #include "dm-rq.h" 10 #include "dm-uevent.h" 11 #include "dm-ima.h" 12 13 #include <linux/init.h> 14 #include <linux/module.h> 15 #include <linux/mutex.h> 16 #include <linux/sched/mm.h> 17 #include <linux/sched/signal.h> 18 #include <linux/blkpg.h> 19 #include <linux/bio.h> 20 #include <linux/mempool.h> 21 #include <linux/dax.h> 22 #include <linux/slab.h> 23 #include <linux/idr.h> 24 #include <linux/uio.h> 25 #include <linux/hdreg.h> 26 #include <linux/delay.h> 27 #include <linux/wait.h> 28 #include <linux/pr.h> 29 #include <linux/refcount.h> 30 #include <linux/part_stat.h> 31 #include <linux/blk-crypto.h> 32 #include <linux/blk-crypto-profile.h> 33 34 #define DM_MSG_PREFIX "core" 35 36 /* 37 * Cookies are numeric values sent with CHANGE and REMOVE 38 * uevents while resuming, removing or renaming the device. 39 */ 40 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 41 #define DM_COOKIE_LENGTH 24 42 43 /* 44 * For REQ_POLLED fs bio, this flag is set if we link mapped underlying 45 * dm_io into one list, and reuse bio->bi_private as the list head. Before 46 * ending this fs bio, we will recover its ->bi_private. 47 */ 48 #define REQ_DM_POLL_LIST REQ_DRV 49 50 static const char *_name = DM_NAME; 51 52 static unsigned int major = 0; 53 static unsigned int _major = 0; 54 55 static DEFINE_IDR(_minor_idr); 56 57 static DEFINE_SPINLOCK(_minor_lock); 58 59 static void do_deferred_remove(struct work_struct *w); 60 61 static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 62 63 static struct workqueue_struct *deferred_remove_workqueue; 64 65 atomic_t dm_global_event_nr = ATOMIC_INIT(0); 66 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 67 68 void dm_issue_global_event(void) 69 { 70 atomic_inc(&dm_global_event_nr); 71 wake_up(&dm_global_eventq); 72 } 73 74 DEFINE_STATIC_KEY_FALSE(stats_enabled); 75 DEFINE_STATIC_KEY_FALSE(swap_bios_enabled); 76 DEFINE_STATIC_KEY_FALSE(zoned_enabled); 77 78 /* 79 * One of these is allocated (on-stack) per original bio. 80 */ 81 struct clone_info { 82 struct dm_table *map; 83 struct bio *bio; 84 struct dm_io *io; 85 sector_t sector; 86 unsigned sector_count; 87 bool is_abnormal_io:1; 88 bool submit_as_polled:1; 89 }; 90 91 static inline struct dm_target_io *clone_to_tio(struct bio *clone) 92 { 93 return container_of(clone, struct dm_target_io, clone); 94 } 95 96 void *dm_per_bio_data(struct bio *bio, size_t data_size) 97 { 98 if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO)) 99 return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; 100 return (char *)bio - DM_IO_BIO_OFFSET - data_size; 101 } 102 EXPORT_SYMBOL_GPL(dm_per_bio_data); 103 104 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 105 { 106 struct dm_io *io = (struct dm_io *)((char *)data + data_size); 107 if (io->magic == DM_IO_MAGIC) 108 return (struct bio *)((char *)io + DM_IO_BIO_OFFSET); 109 BUG_ON(io->magic != DM_TIO_MAGIC); 110 return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET); 111 } 112 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 113 114 unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 115 { 116 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 117 } 118 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 119 120 #define MINOR_ALLOCED ((void *)-1) 121 122 #define DM_NUMA_NODE NUMA_NO_NODE 123 static int dm_numa_node = DM_NUMA_NODE; 124 125 #define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE) 126 static int swap_bios = DEFAULT_SWAP_BIOS; 127 static int get_swap_bios(void) 128 { 129 int latch = READ_ONCE(swap_bios); 130 if (unlikely(latch <= 0)) 131 latch = DEFAULT_SWAP_BIOS; 132 return latch; 133 } 134 135 struct table_device { 136 struct list_head list; 137 refcount_t count; 138 struct dm_dev dm_dev; 139 }; 140 141 /* 142 * Bio-based DM's mempools' reserved IOs set by the user. 143 */ 144 #define RESERVED_BIO_BASED_IOS 16 145 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 146 147 static int __dm_get_module_param_int(int *module_param, int min, int max) 148 { 149 int param = READ_ONCE(*module_param); 150 int modified_param = 0; 151 bool modified = true; 152 153 if (param < min) 154 modified_param = min; 155 else if (param > max) 156 modified_param = max; 157 else 158 modified = false; 159 160 if (modified) { 161 (void)cmpxchg(module_param, param, modified_param); 162 param = modified_param; 163 } 164 165 return param; 166 } 167 168 unsigned __dm_get_module_param(unsigned *module_param, 169 unsigned def, unsigned max) 170 { 171 unsigned param = READ_ONCE(*module_param); 172 unsigned modified_param = 0; 173 174 if (!param) 175 modified_param = def; 176 else if (param > max) 177 modified_param = max; 178 179 if (modified_param) { 180 (void)cmpxchg(module_param, param, modified_param); 181 param = modified_param; 182 } 183 184 return param; 185 } 186 187 unsigned dm_get_reserved_bio_based_ios(void) 188 { 189 return __dm_get_module_param(&reserved_bio_based_ios, 190 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 191 } 192 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 193 194 static unsigned dm_get_numa_node(void) 195 { 196 return __dm_get_module_param_int(&dm_numa_node, 197 DM_NUMA_NODE, num_online_nodes() - 1); 198 } 199 200 static int __init local_init(void) 201 { 202 int r; 203 204 r = dm_uevent_init(); 205 if (r) 206 return r; 207 208 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 209 if (!deferred_remove_workqueue) { 210 r = -ENOMEM; 211 goto out_uevent_exit; 212 } 213 214 _major = major; 215 r = register_blkdev(_major, _name); 216 if (r < 0) 217 goto out_free_workqueue; 218 219 if (!_major) 220 _major = r; 221 222 return 0; 223 224 out_free_workqueue: 225 destroy_workqueue(deferred_remove_workqueue); 226 out_uevent_exit: 227 dm_uevent_exit(); 228 229 return r; 230 } 231 232 static void local_exit(void) 233 { 234 flush_scheduled_work(); 235 destroy_workqueue(deferred_remove_workqueue); 236 237 unregister_blkdev(_major, _name); 238 dm_uevent_exit(); 239 240 _major = 0; 241 242 DMINFO("cleaned up"); 243 } 244 245 static int (*_inits[])(void) __initdata = { 246 local_init, 247 dm_target_init, 248 dm_linear_init, 249 dm_stripe_init, 250 dm_io_init, 251 dm_kcopyd_init, 252 dm_interface_init, 253 dm_statistics_init, 254 }; 255 256 static void (*_exits[])(void) = { 257 local_exit, 258 dm_target_exit, 259 dm_linear_exit, 260 dm_stripe_exit, 261 dm_io_exit, 262 dm_kcopyd_exit, 263 dm_interface_exit, 264 dm_statistics_exit, 265 }; 266 267 static int __init dm_init(void) 268 { 269 const int count = ARRAY_SIZE(_inits); 270 int r, i; 271 272 #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) 273 DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled." 274 " Duplicate IMA measurements will not be recorded in the IMA log."); 275 #endif 276 277 for (i = 0; i < count; i++) { 278 r = _inits[i](); 279 if (r) 280 goto bad; 281 } 282 283 return 0; 284 bad: 285 while (i--) 286 _exits[i](); 287 288 return r; 289 } 290 291 static void __exit dm_exit(void) 292 { 293 int i = ARRAY_SIZE(_exits); 294 295 while (i--) 296 _exits[i](); 297 298 /* 299 * Should be empty by this point. 300 */ 301 idr_destroy(&_minor_idr); 302 } 303 304 /* 305 * Block device functions 306 */ 307 int dm_deleting_md(struct mapped_device *md) 308 { 309 return test_bit(DMF_DELETING, &md->flags); 310 } 311 312 static int dm_blk_open(struct block_device *bdev, fmode_t mode) 313 { 314 struct mapped_device *md; 315 316 spin_lock(&_minor_lock); 317 318 md = bdev->bd_disk->private_data; 319 if (!md) 320 goto out; 321 322 if (test_bit(DMF_FREEING, &md->flags) || 323 dm_deleting_md(md)) { 324 md = NULL; 325 goto out; 326 } 327 328 dm_get(md); 329 atomic_inc(&md->open_count); 330 out: 331 spin_unlock(&_minor_lock); 332 333 return md ? 0 : -ENXIO; 334 } 335 336 static void dm_blk_close(struct gendisk *disk, fmode_t mode) 337 { 338 struct mapped_device *md; 339 340 spin_lock(&_minor_lock); 341 342 md = disk->private_data; 343 if (WARN_ON(!md)) 344 goto out; 345 346 if (atomic_dec_and_test(&md->open_count) && 347 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 348 queue_work(deferred_remove_workqueue, &deferred_remove_work); 349 350 dm_put(md); 351 out: 352 spin_unlock(&_minor_lock); 353 } 354 355 int dm_open_count(struct mapped_device *md) 356 { 357 return atomic_read(&md->open_count); 358 } 359 360 /* 361 * Guarantees nothing is using the device before it's deleted. 362 */ 363 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 364 { 365 int r = 0; 366 367 spin_lock(&_minor_lock); 368 369 if (dm_open_count(md)) { 370 r = -EBUSY; 371 if (mark_deferred) 372 set_bit(DMF_DEFERRED_REMOVE, &md->flags); 373 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 374 r = -EEXIST; 375 else 376 set_bit(DMF_DELETING, &md->flags); 377 378 spin_unlock(&_minor_lock); 379 380 return r; 381 } 382 383 int dm_cancel_deferred_remove(struct mapped_device *md) 384 { 385 int r = 0; 386 387 spin_lock(&_minor_lock); 388 389 if (test_bit(DMF_DELETING, &md->flags)) 390 r = -EBUSY; 391 else 392 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 393 394 spin_unlock(&_minor_lock); 395 396 return r; 397 } 398 399 static void do_deferred_remove(struct work_struct *w) 400 { 401 dm_deferred_remove(); 402 } 403 404 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 405 { 406 struct mapped_device *md = bdev->bd_disk->private_data; 407 408 return dm_get_geometry(md, geo); 409 } 410 411 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 412 struct block_device **bdev) 413 { 414 struct dm_target *ti; 415 struct dm_table *map; 416 int r; 417 418 retry: 419 r = -ENOTTY; 420 map = dm_get_live_table(md, srcu_idx); 421 if (!map || !dm_table_get_size(map)) 422 return r; 423 424 /* We only support devices that have a single target */ 425 if (map->num_targets != 1) 426 return r; 427 428 ti = dm_table_get_target(map, 0); 429 if (!ti->type->prepare_ioctl) 430 return r; 431 432 if (dm_suspended_md(md)) 433 return -EAGAIN; 434 435 r = ti->type->prepare_ioctl(ti, bdev); 436 if (r == -ENOTCONN && !fatal_signal_pending(current)) { 437 dm_put_live_table(md, *srcu_idx); 438 msleep(10); 439 goto retry; 440 } 441 442 return r; 443 } 444 445 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 446 { 447 dm_put_live_table(md, srcu_idx); 448 } 449 450 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 451 unsigned int cmd, unsigned long arg) 452 { 453 struct mapped_device *md = bdev->bd_disk->private_data; 454 int r, srcu_idx; 455 456 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 457 if (r < 0) 458 goto out; 459 460 if (r > 0) { 461 /* 462 * Target determined this ioctl is being issued against a 463 * subset of the parent bdev; require extra privileges. 464 */ 465 if (!capable(CAP_SYS_RAWIO)) { 466 DMDEBUG_LIMIT( 467 "%s: sending ioctl %x to DM device without required privilege.", 468 current->comm, cmd); 469 r = -ENOIOCTLCMD; 470 goto out; 471 } 472 } 473 474 if (!bdev->bd_disk->fops->ioctl) 475 r = -ENOTTY; 476 else 477 r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); 478 out: 479 dm_unprepare_ioctl(md, srcu_idx); 480 return r; 481 } 482 483 u64 dm_start_time_ns_from_clone(struct bio *bio) 484 { 485 return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time); 486 } 487 EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); 488 489 static bool bio_is_flush_with_data(struct bio *bio) 490 { 491 return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size); 492 } 493 494 static void dm_io_acct(struct dm_io *io, bool end) 495 { 496 struct dm_stats_aux *stats_aux = &io->stats_aux; 497 unsigned long start_time = io->start_time; 498 struct mapped_device *md = io->md; 499 struct bio *bio = io->orig_bio; 500 unsigned int sectors; 501 502 /* 503 * If REQ_PREFLUSH set, don't account payload, it will be 504 * submitted (and accounted) after this flush completes. 505 */ 506 if (bio_is_flush_with_data(bio)) 507 sectors = 0; 508 else if (likely(!(dm_io_flagged(io, DM_IO_WAS_SPLIT)))) 509 sectors = bio_sectors(bio); 510 else 511 sectors = io->sectors; 512 513 if (!end) 514 bdev_start_io_acct(bio->bi_bdev, sectors, bio_op(bio), 515 start_time); 516 else 517 bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time); 518 519 if (static_branch_unlikely(&stats_enabled) && 520 unlikely(dm_stats_used(&md->stats))) { 521 sector_t sector; 522 523 if (likely(!dm_io_flagged(io, DM_IO_WAS_SPLIT))) 524 sector = bio->bi_iter.bi_sector; 525 else 526 sector = bio_end_sector(bio) - io->sector_offset; 527 528 dm_stats_account_io(&md->stats, bio_data_dir(bio), 529 sector, sectors, 530 end, start_time, stats_aux); 531 } 532 } 533 534 static void __dm_start_io_acct(struct dm_io *io) 535 { 536 dm_io_acct(io, false); 537 } 538 539 static void dm_start_io_acct(struct dm_io *io, struct bio *clone) 540 { 541 /* 542 * Ensure IO accounting is only ever started once. 543 */ 544 if (dm_io_flagged(io, DM_IO_ACCOUNTED)) 545 return; 546 547 /* Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO. */ 548 if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) { 549 dm_io_set_flag(io, DM_IO_ACCOUNTED); 550 } else { 551 unsigned long flags; 552 /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */ 553 spin_lock_irqsave(&io->lock, flags); 554 if (dm_io_flagged(io, DM_IO_ACCOUNTED)) { 555 spin_unlock_irqrestore(&io->lock, flags); 556 return; 557 } 558 dm_io_set_flag(io, DM_IO_ACCOUNTED); 559 spin_unlock_irqrestore(&io->lock, flags); 560 } 561 562 __dm_start_io_acct(io); 563 } 564 565 static void dm_end_io_acct(struct dm_io *io) 566 { 567 dm_io_acct(io, true); 568 } 569 570 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 571 { 572 struct dm_io *io; 573 struct dm_target_io *tio; 574 struct bio *clone; 575 576 clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); 577 tio = clone_to_tio(clone); 578 tio->flags = 0; 579 dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO); 580 tio->io = NULL; 581 582 io = container_of(tio, struct dm_io, tio); 583 io->magic = DM_IO_MAGIC; 584 io->status = BLK_STS_OK; 585 586 /* one ref is for submission, the other is for completion */ 587 atomic_set(&io->io_count, 2); 588 this_cpu_inc(*md->pending_io); 589 io->orig_bio = bio; 590 io->md = md; 591 spin_lock_init(&io->lock); 592 io->start_time = jiffies; 593 io->flags = 0; 594 595 if (static_branch_unlikely(&stats_enabled)) 596 dm_stats_record_start(&md->stats, &io->stats_aux); 597 598 return io; 599 } 600 601 static void free_io(struct dm_io *io) 602 { 603 bio_put(&io->tio.clone); 604 } 605 606 static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, 607 unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask) 608 { 609 struct mapped_device *md = ci->io->md; 610 struct dm_target_io *tio; 611 struct bio *clone; 612 613 if (!ci->io->tio.io) { 614 /* the dm_target_io embedded in ci->io is available */ 615 tio = &ci->io->tio; 616 /* alloc_io() already initialized embedded clone */ 617 clone = &tio->clone; 618 } else { 619 clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, 620 &md->mempools->bs); 621 if (!clone) 622 return NULL; 623 624 /* REQ_DM_POLL_LIST shouldn't be inherited */ 625 clone->bi_opf &= ~REQ_DM_POLL_LIST; 626 627 tio = clone_to_tio(clone); 628 tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */ 629 } 630 631 tio->magic = DM_TIO_MAGIC; 632 tio->io = ci->io; 633 tio->ti = ti; 634 tio->target_bio_nr = target_bio_nr; 635 tio->len_ptr = len; 636 tio->old_sector = 0; 637 638 /* Set default bdev, but target must bio_set_dev() before issuing IO */ 639 clone->bi_bdev = md->disk->part0; 640 if (unlikely(ti->needs_bio_set_dev)) 641 bio_set_dev(clone, md->disk->part0); 642 643 if (len) { 644 clone->bi_iter.bi_size = to_bytes(*len); 645 if (bio_integrity(clone)) 646 bio_integrity_trim(clone); 647 } 648 649 return clone; 650 } 651 652 static void free_tio(struct bio *clone) 653 { 654 if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO)) 655 return; 656 bio_put(clone); 657 } 658 659 /* 660 * Add the bio to the list of deferred io. 661 */ 662 static void queue_io(struct mapped_device *md, struct bio *bio) 663 { 664 unsigned long flags; 665 666 spin_lock_irqsave(&md->deferred_lock, flags); 667 bio_list_add(&md->deferred, bio); 668 spin_unlock_irqrestore(&md->deferred_lock, flags); 669 queue_work(md->wq, &md->work); 670 } 671 672 /* 673 * Everyone (including functions in this file), should use this 674 * function to access the md->map field, and make sure they call 675 * dm_put_live_table() when finished. 676 */ 677 struct dm_table *dm_get_live_table(struct mapped_device *md, 678 int *srcu_idx) __acquires(md->io_barrier) 679 { 680 *srcu_idx = srcu_read_lock(&md->io_barrier); 681 682 return srcu_dereference(md->map, &md->io_barrier); 683 } 684 685 void dm_put_live_table(struct mapped_device *md, 686 int srcu_idx) __releases(md->io_barrier) 687 { 688 srcu_read_unlock(&md->io_barrier, srcu_idx); 689 } 690 691 void dm_sync_table(struct mapped_device *md) 692 { 693 synchronize_srcu(&md->io_barrier); 694 synchronize_rcu_expedited(); 695 } 696 697 /* 698 * A fast alternative to dm_get_live_table/dm_put_live_table. 699 * The caller must not block between these two functions. 700 */ 701 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 702 { 703 rcu_read_lock(); 704 return rcu_dereference(md->map); 705 } 706 707 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 708 { 709 rcu_read_unlock(); 710 } 711 712 static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md, 713 int *srcu_idx, blk_opf_t bio_opf) 714 { 715 if (bio_opf & REQ_NOWAIT) 716 return dm_get_live_table_fast(md); 717 else 718 return dm_get_live_table(md, srcu_idx); 719 } 720 721 static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx, 722 blk_opf_t bio_opf) 723 { 724 if (bio_opf & REQ_NOWAIT) 725 dm_put_live_table_fast(md); 726 else 727 dm_put_live_table(md, srcu_idx); 728 } 729 730 static char *_dm_claim_ptr = "I belong to device-mapper"; 731 732 /* 733 * Open a table device so we can use it as a map destination. 734 */ 735 static struct table_device *open_table_device(struct mapped_device *md, 736 dev_t dev, fmode_t mode) 737 { 738 struct table_device *td; 739 struct block_device *bdev; 740 u64 part_off; 741 int r; 742 743 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 744 if (!td) 745 return ERR_PTR(-ENOMEM); 746 refcount_set(&td->count, 1); 747 748 bdev = blkdev_get_by_dev(dev, mode | FMODE_EXCL, _dm_claim_ptr); 749 if (IS_ERR(bdev)) { 750 r = PTR_ERR(bdev); 751 goto out_free_td; 752 } 753 754 /* 755 * We can be called before the dm disk is added. In that case we can't 756 * register the holder relation here. It will be done once add_disk was 757 * called. 758 */ 759 if (md->disk->slave_dir) { 760 r = bd_link_disk_holder(bdev, md->disk); 761 if (r) 762 goto out_blkdev_put; 763 } 764 765 td->dm_dev.mode = mode; 766 td->dm_dev.bdev = bdev; 767 td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, NULL, NULL); 768 format_dev_t(td->dm_dev.name, dev); 769 list_add(&td->list, &md->table_devices); 770 return td; 771 772 out_blkdev_put: 773 blkdev_put(bdev, mode | FMODE_EXCL); 774 out_free_td: 775 kfree(td); 776 return ERR_PTR(r); 777 } 778 779 /* 780 * Close a table device that we've been using. 781 */ 782 static void close_table_device(struct table_device *td, struct mapped_device *md) 783 { 784 if (md->disk->slave_dir) 785 bd_unlink_disk_holder(td->dm_dev.bdev, md->disk); 786 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 787 put_dax(td->dm_dev.dax_dev); 788 list_del(&td->list); 789 kfree(td); 790 } 791 792 static struct table_device *find_table_device(struct list_head *l, dev_t dev, 793 fmode_t mode) 794 { 795 struct table_device *td; 796 797 list_for_each_entry(td, l, list) 798 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 799 return td; 800 801 return NULL; 802 } 803 804 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 805 struct dm_dev **result) 806 { 807 struct table_device *td; 808 809 mutex_lock(&md->table_devices_lock); 810 td = find_table_device(&md->table_devices, dev, mode); 811 if (!td) { 812 td = open_table_device(md, dev, mode); 813 if (IS_ERR(td)) { 814 mutex_unlock(&md->table_devices_lock); 815 return PTR_ERR(td); 816 } 817 } else { 818 refcount_inc(&td->count); 819 } 820 mutex_unlock(&md->table_devices_lock); 821 822 *result = &td->dm_dev; 823 return 0; 824 } 825 826 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 827 { 828 struct table_device *td = container_of(d, struct table_device, dm_dev); 829 830 mutex_lock(&md->table_devices_lock); 831 if (refcount_dec_and_test(&td->count)) 832 close_table_device(td, md); 833 mutex_unlock(&md->table_devices_lock); 834 } 835 836 /* 837 * Get the geometry associated with a dm device 838 */ 839 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 840 { 841 *geo = md->geometry; 842 843 return 0; 844 } 845 846 /* 847 * Set the geometry of a device. 848 */ 849 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 850 { 851 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 852 853 if (geo->start > sz) { 854 DMERR("Start sector is beyond the geometry limits."); 855 return -EINVAL; 856 } 857 858 md->geometry = *geo; 859 860 return 0; 861 } 862 863 static int __noflush_suspending(struct mapped_device *md) 864 { 865 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 866 } 867 868 static void dm_requeue_add_io(struct dm_io *io, bool first_stage) 869 { 870 struct mapped_device *md = io->md; 871 872 if (first_stage) { 873 struct dm_io *next = md->requeue_list; 874 875 md->requeue_list = io; 876 io->next = next; 877 } else { 878 bio_list_add_head(&md->deferred, io->orig_bio); 879 } 880 } 881 882 static void dm_kick_requeue(struct mapped_device *md, bool first_stage) 883 { 884 if (first_stage) 885 queue_work(md->wq, &md->requeue_work); 886 else 887 queue_work(md->wq, &md->work); 888 } 889 890 /* 891 * Return true if the dm_io's original bio is requeued. 892 * io->status is updated with error if requeue disallowed. 893 */ 894 static bool dm_handle_requeue(struct dm_io *io, bool first_stage) 895 { 896 struct bio *bio = io->orig_bio; 897 bool handle_requeue = (io->status == BLK_STS_DM_REQUEUE); 898 bool handle_polled_eagain = ((io->status == BLK_STS_AGAIN) && 899 (bio->bi_opf & REQ_POLLED)); 900 struct mapped_device *md = io->md; 901 bool requeued = false; 902 903 if (handle_requeue || handle_polled_eagain) { 904 unsigned long flags; 905 906 if (bio->bi_opf & REQ_POLLED) { 907 /* 908 * Upper layer won't help us poll split bio 909 * (io->orig_bio may only reflect a subset of the 910 * pre-split original) so clear REQ_POLLED. 911 */ 912 bio_clear_polled(bio); 913 } 914 915 /* 916 * Target requested pushing back the I/O or 917 * polled IO hit BLK_STS_AGAIN. 918 */ 919 spin_lock_irqsave(&md->deferred_lock, flags); 920 if ((__noflush_suspending(md) && 921 !WARN_ON_ONCE(dm_is_zone_write(md, bio))) || 922 handle_polled_eagain || first_stage) { 923 dm_requeue_add_io(io, first_stage); 924 requeued = true; 925 } else { 926 /* 927 * noflush suspend was interrupted or this is 928 * a write to a zoned target. 929 */ 930 io->status = BLK_STS_IOERR; 931 } 932 spin_unlock_irqrestore(&md->deferred_lock, flags); 933 } 934 935 if (requeued) 936 dm_kick_requeue(md, first_stage); 937 938 return requeued; 939 } 940 941 static void __dm_io_complete(struct dm_io *io, bool first_stage) 942 { 943 struct bio *bio = io->orig_bio; 944 struct mapped_device *md = io->md; 945 blk_status_t io_error; 946 bool requeued; 947 948 requeued = dm_handle_requeue(io, first_stage); 949 if (requeued && first_stage) 950 return; 951 952 io_error = io->status; 953 if (dm_io_flagged(io, DM_IO_ACCOUNTED)) 954 dm_end_io_acct(io); 955 else if (!io_error) { 956 /* 957 * Must handle target that DM_MAPIO_SUBMITTED only to 958 * then bio_endio() rather than dm_submit_bio_remap() 959 */ 960 __dm_start_io_acct(io); 961 dm_end_io_acct(io); 962 } 963 free_io(io); 964 smp_wmb(); 965 this_cpu_dec(*md->pending_io); 966 967 /* nudge anyone waiting on suspend queue */ 968 if (unlikely(wq_has_sleeper(&md->wait))) 969 wake_up(&md->wait); 970 971 /* Return early if the original bio was requeued */ 972 if (requeued) 973 return; 974 975 if (bio_is_flush_with_data(bio)) { 976 /* 977 * Preflush done for flush with data, reissue 978 * without REQ_PREFLUSH. 979 */ 980 bio->bi_opf &= ~REQ_PREFLUSH; 981 queue_io(md, bio); 982 } else { 983 /* done with normal IO or empty flush */ 984 if (io_error) 985 bio->bi_status = io_error; 986 bio_endio(bio); 987 } 988 } 989 990 static void dm_wq_requeue_work(struct work_struct *work) 991 { 992 struct mapped_device *md = container_of(work, struct mapped_device, 993 requeue_work); 994 unsigned long flags; 995 struct dm_io *io; 996 997 /* reuse deferred lock to simplify dm_handle_requeue */ 998 spin_lock_irqsave(&md->deferred_lock, flags); 999 io = md->requeue_list; 1000 md->requeue_list = NULL; 1001 spin_unlock_irqrestore(&md->deferred_lock, flags); 1002 1003 while (io) { 1004 struct dm_io *next = io->next; 1005 1006 dm_io_rewind(io, &md->disk->bio_split); 1007 1008 io->next = NULL; 1009 __dm_io_complete(io, false); 1010 io = next; 1011 } 1012 } 1013 1014 /* 1015 * Two staged requeue: 1016 * 1017 * 1) io->orig_bio points to the real original bio, and the part mapped to 1018 * this io must be requeued, instead of other parts of the original bio. 1019 * 1020 * 2) io->orig_bio points to new cloned bio which matches the requeued dm_io. 1021 */ 1022 static void dm_io_complete(struct dm_io *io) 1023 { 1024 bool first_requeue; 1025 1026 /* 1027 * Only dm_io that has been split needs two stage requeue, otherwise 1028 * we may run into long bio clone chain during suspend and OOM could 1029 * be triggered. 1030 * 1031 * Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they 1032 * also aren't handled via the first stage requeue. 1033 */ 1034 if (dm_io_flagged(io, DM_IO_WAS_SPLIT)) 1035 first_requeue = true; 1036 else 1037 first_requeue = false; 1038 1039 __dm_io_complete(io, first_requeue); 1040 } 1041 1042 /* 1043 * Decrements the number of outstanding ios that a bio has been 1044 * cloned into, completing the original io if necc. 1045 */ 1046 static inline void __dm_io_dec_pending(struct dm_io *io) 1047 { 1048 if (atomic_dec_and_test(&io->io_count)) 1049 dm_io_complete(io); 1050 } 1051 1052 static void dm_io_set_error(struct dm_io *io, blk_status_t error) 1053 { 1054 unsigned long flags; 1055 1056 /* Push-back supersedes any I/O errors */ 1057 spin_lock_irqsave(&io->lock, flags); 1058 if (!(io->status == BLK_STS_DM_REQUEUE && 1059 __noflush_suspending(io->md))) { 1060 io->status = error; 1061 } 1062 spin_unlock_irqrestore(&io->lock, flags); 1063 } 1064 1065 static void dm_io_dec_pending(struct dm_io *io, blk_status_t error) 1066 { 1067 if (unlikely(error)) 1068 dm_io_set_error(io, error); 1069 1070 __dm_io_dec_pending(io); 1071 } 1072 1073 void disable_discard(struct mapped_device *md) 1074 { 1075 struct queue_limits *limits = dm_get_queue_limits(md); 1076 1077 /* device doesn't really support DISCARD, disable it */ 1078 limits->max_discard_sectors = 0; 1079 } 1080 1081 void disable_write_zeroes(struct mapped_device *md) 1082 { 1083 struct queue_limits *limits = dm_get_queue_limits(md); 1084 1085 /* device doesn't really support WRITE ZEROES, disable it */ 1086 limits->max_write_zeroes_sectors = 0; 1087 } 1088 1089 static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) 1090 { 1091 return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); 1092 } 1093 1094 static void clone_endio(struct bio *bio) 1095 { 1096 blk_status_t error = bio->bi_status; 1097 struct dm_target_io *tio = clone_to_tio(bio); 1098 struct dm_target *ti = tio->ti; 1099 dm_endio_fn endio = ti->type->end_io; 1100 struct dm_io *io = tio->io; 1101 struct mapped_device *md = io->md; 1102 1103 if (unlikely(error == BLK_STS_TARGET)) { 1104 if (bio_op(bio) == REQ_OP_DISCARD && 1105 !bdev_max_discard_sectors(bio->bi_bdev)) 1106 disable_discard(md); 1107 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 1108 !bdev_write_zeroes_sectors(bio->bi_bdev)) 1109 disable_write_zeroes(md); 1110 } 1111 1112 if (static_branch_unlikely(&zoned_enabled) && 1113 unlikely(bdev_is_zoned(bio->bi_bdev))) 1114 dm_zone_endio(io, bio); 1115 1116 if (endio) { 1117 int r = endio(ti, bio, &error); 1118 switch (r) { 1119 case DM_ENDIO_REQUEUE: 1120 if (static_branch_unlikely(&zoned_enabled)) { 1121 /* 1122 * Requeuing writes to a sequential zone of a zoned 1123 * target will break the sequential write pattern: 1124 * fail such IO. 1125 */ 1126 if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) 1127 error = BLK_STS_IOERR; 1128 else 1129 error = BLK_STS_DM_REQUEUE; 1130 } else 1131 error = BLK_STS_DM_REQUEUE; 1132 fallthrough; 1133 case DM_ENDIO_DONE: 1134 break; 1135 case DM_ENDIO_INCOMPLETE: 1136 /* The target will handle the io */ 1137 return; 1138 default: 1139 DMCRIT("unimplemented target endio return value: %d", r); 1140 BUG(); 1141 } 1142 } 1143 1144 if (static_branch_unlikely(&swap_bios_enabled) && 1145 unlikely(swap_bios_limit(ti, bio))) 1146 up(&md->swap_bios_semaphore); 1147 1148 free_tio(bio); 1149 dm_io_dec_pending(io, error); 1150 } 1151 1152 /* 1153 * Return maximum size of I/O possible at the supplied sector up to the current 1154 * target boundary. 1155 */ 1156 static inline sector_t max_io_len_target_boundary(struct dm_target *ti, 1157 sector_t target_offset) 1158 { 1159 return ti->len - target_offset; 1160 } 1161 1162 static sector_t max_io_len(struct dm_target *ti, sector_t sector) 1163 { 1164 sector_t target_offset = dm_target_offset(ti, sector); 1165 sector_t len = max_io_len_target_boundary(ti, target_offset); 1166 1167 /* 1168 * Does the target need to split IO even further? 1169 * - varied (per target) IO splitting is a tenet of DM; this 1170 * explains why stacked chunk_sectors based splitting via 1171 * bio_split_to_limits() isn't possible here. 1172 */ 1173 if (!ti->max_io_len) 1174 return len; 1175 return min_t(sector_t, len, 1176 min(queue_max_sectors(ti->table->md->queue), 1177 blk_chunk_sectors_left(target_offset, ti->max_io_len))); 1178 } 1179 1180 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1181 { 1182 if (len > UINT_MAX) { 1183 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1184 (unsigned long long)len, UINT_MAX); 1185 ti->error = "Maximum size of target IO is too large"; 1186 return -EINVAL; 1187 } 1188 1189 ti->max_io_len = (uint32_t) len; 1190 1191 return 0; 1192 } 1193 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1194 1195 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1196 sector_t sector, int *srcu_idx) 1197 __acquires(md->io_barrier) 1198 { 1199 struct dm_table *map; 1200 struct dm_target *ti; 1201 1202 map = dm_get_live_table(md, srcu_idx); 1203 if (!map) 1204 return NULL; 1205 1206 ti = dm_table_find_target(map, sector); 1207 if (!ti) 1208 return NULL; 1209 1210 return ti; 1211 } 1212 1213 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1214 long nr_pages, enum dax_access_mode mode, void **kaddr, 1215 pfn_t *pfn) 1216 { 1217 struct mapped_device *md = dax_get_private(dax_dev); 1218 sector_t sector = pgoff * PAGE_SECTORS; 1219 struct dm_target *ti; 1220 long len, ret = -EIO; 1221 int srcu_idx; 1222 1223 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1224 1225 if (!ti) 1226 goto out; 1227 if (!ti->type->direct_access) 1228 goto out; 1229 len = max_io_len(ti, sector) / PAGE_SECTORS; 1230 if (len < 1) 1231 goto out; 1232 nr_pages = min(len, nr_pages); 1233 ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn); 1234 1235 out: 1236 dm_put_live_table(md, srcu_idx); 1237 1238 return ret; 1239 } 1240 1241 static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 1242 size_t nr_pages) 1243 { 1244 struct mapped_device *md = dax_get_private(dax_dev); 1245 sector_t sector = pgoff * PAGE_SECTORS; 1246 struct dm_target *ti; 1247 int ret = -EIO; 1248 int srcu_idx; 1249 1250 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1251 1252 if (!ti) 1253 goto out; 1254 if (WARN_ON(!ti->type->dax_zero_page_range)) { 1255 /* 1256 * ->zero_page_range() is mandatory dax operation. If we are 1257 * here, something is wrong. 1258 */ 1259 goto out; 1260 } 1261 ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); 1262 out: 1263 dm_put_live_table(md, srcu_idx); 1264 1265 return ret; 1266 } 1267 1268 static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, 1269 void *addr, size_t bytes, struct iov_iter *i) 1270 { 1271 struct mapped_device *md = dax_get_private(dax_dev); 1272 sector_t sector = pgoff * PAGE_SECTORS; 1273 struct dm_target *ti; 1274 int srcu_idx; 1275 long ret = 0; 1276 1277 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1278 if (!ti || !ti->type->dax_recovery_write) 1279 goto out; 1280 1281 ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i); 1282 out: 1283 dm_put_live_table(md, srcu_idx); 1284 return ret; 1285 } 1286 1287 /* 1288 * A target may call dm_accept_partial_bio only from the map routine. It is 1289 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management 1290 * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by 1291 * __send_duplicate_bios(). 1292 * 1293 * dm_accept_partial_bio informs the dm that the target only wants to process 1294 * additional n_sectors sectors of the bio and the rest of the data should be 1295 * sent in a next bio. 1296 * 1297 * A diagram that explains the arithmetics: 1298 * +--------------------+---------------+-------+ 1299 * | 1 | 2 | 3 | 1300 * +--------------------+---------------+-------+ 1301 * 1302 * <-------------- *tio->len_ptr ---------------> 1303 * <----- bio_sectors -----> 1304 * <-- n_sectors --> 1305 * 1306 * Region 1 was already iterated over with bio_advance or similar function. 1307 * (it may be empty if the target doesn't use bio_advance) 1308 * Region 2 is the remaining bio size that the target wants to process. 1309 * (it may be empty if region 1 is non-empty, although there is no reason 1310 * to make it empty) 1311 * The target requires that region 3 is to be sent in the next bio. 1312 * 1313 * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 1314 * the partially processed part (the sum of regions 1+2) must be the same for all 1315 * copies of the bio. 1316 */ 1317 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 1318 { 1319 struct dm_target_io *tio = clone_to_tio(bio); 1320 struct dm_io *io = tio->io; 1321 unsigned bio_sectors = bio_sectors(bio); 1322 1323 BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); 1324 BUG_ON(op_is_zone_mgmt(bio_op(bio))); 1325 BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND); 1326 BUG_ON(bio_sectors > *tio->len_ptr); 1327 BUG_ON(n_sectors > bio_sectors); 1328 1329 *tio->len_ptr -= bio_sectors - n_sectors; 1330 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1331 1332 /* 1333 * __split_and_process_bio() may have already saved mapped part 1334 * for accounting but it is being reduced so update accordingly. 1335 */ 1336 dm_io_set_flag(io, DM_IO_WAS_SPLIT); 1337 io->sectors = n_sectors; 1338 io->sector_offset = bio_sectors(io->orig_bio); 1339 } 1340 EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1341 1342 /* 1343 * @clone: clone bio that DM core passed to target's .map function 1344 * @tgt_clone: clone of @clone bio that target needs submitted 1345 * 1346 * Targets should use this interface to submit bios they take 1347 * ownership of when returning DM_MAPIO_SUBMITTED. 1348 * 1349 * Target should also enable ti->accounts_remapped_io 1350 */ 1351 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone) 1352 { 1353 struct dm_target_io *tio = clone_to_tio(clone); 1354 struct dm_io *io = tio->io; 1355 1356 /* establish bio that will get submitted */ 1357 if (!tgt_clone) 1358 tgt_clone = clone; 1359 1360 /* 1361 * Account io->origin_bio to DM dev on behalf of target 1362 * that took ownership of IO with DM_MAPIO_SUBMITTED. 1363 */ 1364 dm_start_io_acct(io, clone); 1365 1366 trace_block_bio_remap(tgt_clone, disk_devt(io->md->disk), 1367 tio->old_sector); 1368 submit_bio_noacct(tgt_clone); 1369 } 1370 EXPORT_SYMBOL_GPL(dm_submit_bio_remap); 1371 1372 static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) 1373 { 1374 mutex_lock(&md->swap_bios_lock); 1375 while (latch < md->swap_bios) { 1376 cond_resched(); 1377 down(&md->swap_bios_semaphore); 1378 md->swap_bios--; 1379 } 1380 while (latch > md->swap_bios) { 1381 cond_resched(); 1382 up(&md->swap_bios_semaphore); 1383 md->swap_bios++; 1384 } 1385 mutex_unlock(&md->swap_bios_lock); 1386 } 1387 1388 static void __map_bio(struct bio *clone) 1389 { 1390 struct dm_target_io *tio = clone_to_tio(clone); 1391 struct dm_target *ti = tio->ti; 1392 struct dm_io *io = tio->io; 1393 struct mapped_device *md = io->md; 1394 int r; 1395 1396 clone->bi_end_io = clone_endio; 1397 1398 /* 1399 * Map the clone. 1400 */ 1401 tio->old_sector = clone->bi_iter.bi_sector; 1402 1403 if (static_branch_unlikely(&swap_bios_enabled) && 1404 unlikely(swap_bios_limit(ti, clone))) { 1405 int latch = get_swap_bios(); 1406 if (unlikely(latch != md->swap_bios)) 1407 __set_swap_bios_limit(md, latch); 1408 down(&md->swap_bios_semaphore); 1409 } 1410 1411 if (static_branch_unlikely(&zoned_enabled)) { 1412 /* 1413 * Check if the IO needs a special mapping due to zone append 1414 * emulation on zoned target. In this case, dm_zone_map_bio() 1415 * calls the target map operation. 1416 */ 1417 if (unlikely(dm_emulate_zone_append(md))) 1418 r = dm_zone_map_bio(tio); 1419 else 1420 r = ti->type->map(ti, clone); 1421 } else 1422 r = ti->type->map(ti, clone); 1423 1424 switch (r) { 1425 case DM_MAPIO_SUBMITTED: 1426 /* target has assumed ownership of this io */ 1427 if (!ti->accounts_remapped_io) 1428 dm_start_io_acct(io, clone); 1429 break; 1430 case DM_MAPIO_REMAPPED: 1431 dm_submit_bio_remap(clone, NULL); 1432 break; 1433 case DM_MAPIO_KILL: 1434 case DM_MAPIO_REQUEUE: 1435 if (static_branch_unlikely(&swap_bios_enabled) && 1436 unlikely(swap_bios_limit(ti, clone))) 1437 up(&md->swap_bios_semaphore); 1438 free_tio(clone); 1439 if (r == DM_MAPIO_KILL) 1440 dm_io_dec_pending(io, BLK_STS_IOERR); 1441 else 1442 dm_io_dec_pending(io, BLK_STS_DM_REQUEUE); 1443 break; 1444 default: 1445 DMCRIT("unimplemented target map return value: %d", r); 1446 BUG(); 1447 } 1448 } 1449 1450 static void setup_split_accounting(struct clone_info *ci, unsigned len) 1451 { 1452 struct dm_io *io = ci->io; 1453 1454 if (ci->sector_count > len) { 1455 /* 1456 * Split needed, save the mapped part for accounting. 1457 * NOTE: dm_accept_partial_bio() will update accordingly. 1458 */ 1459 dm_io_set_flag(io, DM_IO_WAS_SPLIT); 1460 io->sectors = len; 1461 io->sector_offset = bio_sectors(ci->bio); 1462 } 1463 } 1464 1465 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1466 struct dm_target *ti, unsigned num_bios) 1467 { 1468 struct bio *bio; 1469 int try; 1470 1471 for (try = 0; try < 2; try++) { 1472 int bio_nr; 1473 1474 if (try) 1475 mutex_lock(&ci->io->md->table_devices_lock); 1476 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1477 bio = alloc_tio(ci, ti, bio_nr, NULL, 1478 try ? GFP_NOIO : GFP_NOWAIT); 1479 if (!bio) 1480 break; 1481 1482 bio_list_add(blist, bio); 1483 } 1484 if (try) 1485 mutex_unlock(&ci->io->md->table_devices_lock); 1486 if (bio_nr == num_bios) 1487 return; 1488 1489 while ((bio = bio_list_pop(blist))) 1490 free_tio(bio); 1491 } 1492 } 1493 1494 static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1495 unsigned int num_bios, unsigned *len) 1496 { 1497 struct bio_list blist = BIO_EMPTY_LIST; 1498 struct bio *clone; 1499 unsigned int ret = 0; 1500 1501 switch (num_bios) { 1502 case 0: 1503 break; 1504 case 1: 1505 if (len) 1506 setup_split_accounting(ci, *len); 1507 clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); 1508 __map_bio(clone); 1509 ret = 1; 1510 break; 1511 default: 1512 /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */ 1513 alloc_multiple_bios(&blist, ci, ti, num_bios); 1514 while ((clone = bio_list_pop(&blist))) { 1515 dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); 1516 __map_bio(clone); 1517 ret += 1; 1518 } 1519 break; 1520 } 1521 1522 return ret; 1523 } 1524 1525 static void __send_empty_flush(struct clone_info *ci) 1526 { 1527 struct dm_table *t = ci->map; 1528 struct bio flush_bio; 1529 1530 /* 1531 * Use an on-stack bio for this, it's safe since we don't 1532 * need to reference it after submit. It's just used as 1533 * the basis for the clone(s). 1534 */ 1535 bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, 1536 REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC); 1537 1538 ci->bio = &flush_bio; 1539 ci->sector_count = 0; 1540 ci->io->tio.clone.bi_iter.bi_size = 0; 1541 1542 for (unsigned int i = 0; i < t->num_targets; i++) { 1543 unsigned int bios; 1544 struct dm_target *ti = dm_table_get_target(t, i); 1545 1546 atomic_add(ti->num_flush_bios, &ci->io->io_count); 1547 bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1548 atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count); 1549 } 1550 1551 /* 1552 * alloc_io() takes one extra reference for submission, so the 1553 * reference won't reach 0 without the following subtraction 1554 */ 1555 atomic_sub(1, &ci->io->io_count); 1556 1557 bio_uninit(ci->bio); 1558 } 1559 1560 static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 1561 unsigned num_bios) 1562 { 1563 unsigned len; 1564 unsigned int bios; 1565 1566 len = min_t(sector_t, ci->sector_count, 1567 max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); 1568 1569 atomic_add(num_bios, &ci->io->io_count); 1570 bios = __send_duplicate_bios(ci, ti, num_bios, &len); 1571 /* 1572 * alloc_io() takes one extra reference for submission, so the 1573 * reference won't reach 0 without the following (+1) subtraction 1574 */ 1575 atomic_sub(num_bios - bios + 1, &ci->io->io_count); 1576 1577 ci->sector += len; 1578 ci->sector_count -= len; 1579 } 1580 1581 static bool is_abnormal_io(struct bio *bio) 1582 { 1583 enum req_op op = bio_op(bio); 1584 1585 if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) { 1586 switch (op) { 1587 case REQ_OP_DISCARD: 1588 case REQ_OP_SECURE_ERASE: 1589 case REQ_OP_WRITE_ZEROES: 1590 return true; 1591 default: 1592 break; 1593 } 1594 } 1595 1596 return false; 1597 } 1598 1599 static blk_status_t __process_abnormal_io(struct clone_info *ci, 1600 struct dm_target *ti) 1601 { 1602 unsigned num_bios = 0; 1603 1604 switch (bio_op(ci->bio)) { 1605 case REQ_OP_DISCARD: 1606 num_bios = ti->num_discard_bios; 1607 break; 1608 case REQ_OP_SECURE_ERASE: 1609 num_bios = ti->num_secure_erase_bios; 1610 break; 1611 case REQ_OP_WRITE_ZEROES: 1612 num_bios = ti->num_write_zeroes_bios; 1613 break; 1614 default: 1615 break; 1616 } 1617 1618 /* 1619 * Even though the device advertised support for this type of 1620 * request, that does not mean every target supports it, and 1621 * reconfiguration might also have changed that since the 1622 * check was performed. 1623 */ 1624 if (unlikely(!num_bios)) 1625 return BLK_STS_NOTSUPP; 1626 1627 __send_changing_extent_only(ci, ti, num_bios); 1628 return BLK_STS_OK; 1629 } 1630 1631 /* 1632 * Reuse ->bi_private as dm_io list head for storing all dm_io instances 1633 * associated with this bio, and this bio's bi_private needs to be 1634 * stored in dm_io->data before the reuse. 1635 * 1636 * bio->bi_private is owned by fs or upper layer, so block layer won't 1637 * touch it after splitting. Meantime it won't be changed by anyone after 1638 * bio is submitted. So this reuse is safe. 1639 */ 1640 static inline struct dm_io **dm_poll_list_head(struct bio *bio) 1641 { 1642 return (struct dm_io **)&bio->bi_private; 1643 } 1644 1645 static void dm_queue_poll_io(struct bio *bio, struct dm_io *io) 1646 { 1647 struct dm_io **head = dm_poll_list_head(bio); 1648 1649 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) { 1650 bio->bi_opf |= REQ_DM_POLL_LIST; 1651 /* 1652 * Save .bi_private into dm_io, so that we can reuse 1653 * .bi_private as dm_io list head for storing dm_io list 1654 */ 1655 io->data = bio->bi_private; 1656 1657 /* tell block layer to poll for completion */ 1658 bio->bi_cookie = ~BLK_QC_T_NONE; 1659 1660 io->next = NULL; 1661 } else { 1662 /* 1663 * bio recursed due to split, reuse original poll list, 1664 * and save bio->bi_private too. 1665 */ 1666 io->data = (*head)->data; 1667 io->next = *head; 1668 } 1669 1670 *head = io; 1671 } 1672 1673 /* 1674 * Select the correct strategy for processing a non-flush bio. 1675 */ 1676 static blk_status_t __split_and_process_bio(struct clone_info *ci) 1677 { 1678 struct bio *clone; 1679 struct dm_target *ti; 1680 unsigned len; 1681 1682 ti = dm_table_find_target(ci->map, ci->sector); 1683 if (unlikely(!ti)) 1684 return BLK_STS_IOERR; 1685 1686 if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) && 1687 unlikely(!dm_target_supports_nowait(ti->type))) 1688 return BLK_STS_NOTSUPP; 1689 1690 if (unlikely(ci->is_abnormal_io)) 1691 return __process_abnormal_io(ci, ti); 1692 1693 /* 1694 * Only support bio polling for normal IO, and the target io is 1695 * exactly inside the dm_io instance (verified in dm_poll_dm_io) 1696 */ 1697 ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED); 1698 1699 len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); 1700 setup_split_accounting(ci, len); 1701 clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); 1702 __map_bio(clone); 1703 1704 ci->sector += len; 1705 ci->sector_count -= len; 1706 1707 return BLK_STS_OK; 1708 } 1709 1710 static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 1711 struct dm_table *map, struct bio *bio, bool is_abnormal) 1712 { 1713 ci->map = map; 1714 ci->io = alloc_io(md, bio); 1715 ci->bio = bio; 1716 ci->is_abnormal_io = is_abnormal; 1717 ci->submit_as_polled = false; 1718 ci->sector = bio->bi_iter.bi_sector; 1719 ci->sector_count = bio_sectors(bio); 1720 1721 /* Shouldn't happen but sector_count was being set to 0 so... */ 1722 if (static_branch_unlikely(&zoned_enabled) && 1723 WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count)) 1724 ci->sector_count = 0; 1725 } 1726 1727 /* 1728 * Entry point to split a bio into clones and submit them to the targets. 1729 */ 1730 static void dm_split_and_process_bio(struct mapped_device *md, 1731 struct dm_table *map, struct bio *bio) 1732 { 1733 struct clone_info ci; 1734 struct dm_io *io; 1735 blk_status_t error = BLK_STS_OK; 1736 bool is_abnormal; 1737 1738 is_abnormal = is_abnormal_io(bio); 1739 if (unlikely(is_abnormal)) { 1740 /* 1741 * Use bio_split_to_limits() for abnormal IO (e.g. discard, etc) 1742 * otherwise associated queue_limits won't be imposed. 1743 */ 1744 bio = bio_split_to_limits(bio); 1745 if (!bio) 1746 return; 1747 } 1748 1749 init_clone_info(&ci, md, map, bio, is_abnormal); 1750 io = ci.io; 1751 1752 if (bio->bi_opf & REQ_PREFLUSH) { 1753 __send_empty_flush(&ci); 1754 /* dm_io_complete submits any data associated with flush */ 1755 goto out; 1756 } 1757 1758 error = __split_and_process_bio(&ci); 1759 if (error || !ci.sector_count) 1760 goto out; 1761 /* 1762 * Remainder must be passed to submit_bio_noacct() so it gets handled 1763 * *after* bios already submitted have been completely processed. 1764 */ 1765 bio_trim(bio, io->sectors, ci.sector_count); 1766 trace_block_split(bio, bio->bi_iter.bi_sector); 1767 bio_inc_remaining(bio); 1768 submit_bio_noacct(bio); 1769 out: 1770 /* 1771 * Drop the extra reference count for non-POLLED bio, and hold one 1772 * reference for POLLED bio, which will be released in dm_poll_bio 1773 * 1774 * Add every dm_io instance into the dm_io list head which is stored 1775 * in bio->bi_private, so that dm_poll_bio can poll them all. 1776 */ 1777 if (error || !ci.submit_as_polled) { 1778 /* 1779 * In case of submission failure, the extra reference for 1780 * submitting io isn't consumed yet 1781 */ 1782 if (error) 1783 atomic_dec(&io->io_count); 1784 dm_io_dec_pending(io, error); 1785 } else 1786 dm_queue_poll_io(bio, io); 1787 } 1788 1789 static void dm_submit_bio(struct bio *bio) 1790 { 1791 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; 1792 int srcu_idx; 1793 struct dm_table *map; 1794 blk_opf_t bio_opf = bio->bi_opf; 1795 1796 map = dm_get_live_table_bio(md, &srcu_idx, bio_opf); 1797 1798 /* If suspended, or map not yet available, queue this IO for later */ 1799 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || 1800 unlikely(!map)) { 1801 if (bio->bi_opf & REQ_NOWAIT) 1802 bio_wouldblock_error(bio); 1803 else if (bio->bi_opf & REQ_RAHEAD) 1804 bio_io_error(bio); 1805 else 1806 queue_io(md, bio); 1807 goto out; 1808 } 1809 1810 dm_split_and_process_bio(md, map, bio); 1811 out: 1812 dm_put_live_table_bio(md, srcu_idx, bio_opf); 1813 } 1814 1815 static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob, 1816 unsigned int flags) 1817 { 1818 WARN_ON_ONCE(!dm_tio_is_normal(&io->tio)); 1819 1820 /* don't poll if the mapped io is done */ 1821 if (atomic_read(&io->io_count) > 1) 1822 bio_poll(&io->tio.clone, iob, flags); 1823 1824 /* bio_poll holds the last reference */ 1825 return atomic_read(&io->io_count) == 1; 1826 } 1827 1828 static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob, 1829 unsigned int flags) 1830 { 1831 struct dm_io **head = dm_poll_list_head(bio); 1832 struct dm_io *list = *head; 1833 struct dm_io *tmp = NULL; 1834 struct dm_io *curr, *next; 1835 1836 /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */ 1837 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) 1838 return 0; 1839 1840 WARN_ON_ONCE(!list); 1841 1842 /* 1843 * Restore .bi_private before possibly completing dm_io. 1844 * 1845 * bio_poll() is only possible once @bio has been completely 1846 * submitted via submit_bio_noacct()'s depth-first submission. 1847 * So there is no dm_queue_poll_io() race associated with 1848 * clearing REQ_DM_POLL_LIST here. 1849 */ 1850 bio->bi_opf &= ~REQ_DM_POLL_LIST; 1851 bio->bi_private = list->data; 1852 1853 for (curr = list, next = curr->next; curr; curr = next, next = 1854 curr ? curr->next : NULL) { 1855 if (dm_poll_dm_io(curr, iob, flags)) { 1856 /* 1857 * clone_endio() has already occurred, so no 1858 * error handling is needed here. 1859 */ 1860 __dm_io_dec_pending(curr); 1861 } else { 1862 curr->next = tmp; 1863 tmp = curr; 1864 } 1865 } 1866 1867 /* Not done? */ 1868 if (tmp) { 1869 bio->bi_opf |= REQ_DM_POLL_LIST; 1870 /* Reset bio->bi_private to dm_io list head */ 1871 *head = tmp; 1872 return 0; 1873 } 1874 return 1; 1875 } 1876 1877 /*----------------------------------------------------------------- 1878 * An IDR is used to keep track of allocated minor numbers. 1879 *---------------------------------------------------------------*/ 1880 static void free_minor(int minor) 1881 { 1882 spin_lock(&_minor_lock); 1883 idr_remove(&_minor_idr, minor); 1884 spin_unlock(&_minor_lock); 1885 } 1886 1887 /* 1888 * See if the device with a specific minor # is free. 1889 */ 1890 static int specific_minor(int minor) 1891 { 1892 int r; 1893 1894 if (minor >= (1 << MINORBITS)) 1895 return -EINVAL; 1896 1897 idr_preload(GFP_KERNEL); 1898 spin_lock(&_minor_lock); 1899 1900 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 1901 1902 spin_unlock(&_minor_lock); 1903 idr_preload_end(); 1904 if (r < 0) 1905 return r == -ENOSPC ? -EBUSY : r; 1906 return 0; 1907 } 1908 1909 static int next_free_minor(int *minor) 1910 { 1911 int r; 1912 1913 idr_preload(GFP_KERNEL); 1914 spin_lock(&_minor_lock); 1915 1916 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 1917 1918 spin_unlock(&_minor_lock); 1919 idr_preload_end(); 1920 if (r < 0) 1921 return r; 1922 *minor = r; 1923 return 0; 1924 } 1925 1926 static const struct block_device_operations dm_blk_dops; 1927 static const struct block_device_operations dm_rq_blk_dops; 1928 static const struct dax_operations dm_dax_ops; 1929 1930 static void dm_wq_work(struct work_struct *work); 1931 1932 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 1933 static void dm_queue_destroy_crypto_profile(struct request_queue *q) 1934 { 1935 dm_destroy_crypto_profile(q->crypto_profile); 1936 } 1937 1938 #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1939 1940 static inline void dm_queue_destroy_crypto_profile(struct request_queue *q) 1941 { 1942 } 1943 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ 1944 1945 static void cleanup_mapped_device(struct mapped_device *md) 1946 { 1947 if (md->wq) 1948 destroy_workqueue(md->wq); 1949 dm_free_md_mempools(md->mempools); 1950 1951 if (md->dax_dev) { 1952 dax_remove_host(md->disk); 1953 kill_dax(md->dax_dev); 1954 put_dax(md->dax_dev); 1955 md->dax_dev = NULL; 1956 } 1957 1958 dm_cleanup_zoned_dev(md); 1959 if (md->disk) { 1960 spin_lock(&_minor_lock); 1961 md->disk->private_data = NULL; 1962 spin_unlock(&_minor_lock); 1963 if (dm_get_md_type(md) != DM_TYPE_NONE) { 1964 struct table_device *td; 1965 1966 dm_sysfs_exit(md); 1967 list_for_each_entry(td, &md->table_devices, list) { 1968 bd_unlink_disk_holder(td->dm_dev.bdev, 1969 md->disk); 1970 } 1971 1972 /* 1973 * Hold lock to make sure del_gendisk() won't concurrent 1974 * with open/close_table_device(). 1975 */ 1976 mutex_lock(&md->table_devices_lock); 1977 del_gendisk(md->disk); 1978 mutex_unlock(&md->table_devices_lock); 1979 } 1980 dm_queue_destroy_crypto_profile(md->queue); 1981 put_disk(md->disk); 1982 } 1983 1984 if (md->pending_io) { 1985 free_percpu(md->pending_io); 1986 md->pending_io = NULL; 1987 } 1988 1989 cleanup_srcu_struct(&md->io_barrier); 1990 1991 mutex_destroy(&md->suspend_lock); 1992 mutex_destroy(&md->type_lock); 1993 mutex_destroy(&md->table_devices_lock); 1994 mutex_destroy(&md->swap_bios_lock); 1995 1996 dm_mq_cleanup_mapped_device(md); 1997 } 1998 1999 /* 2000 * Allocate and initialise a blank device with a given minor. 2001 */ 2002 static struct mapped_device *alloc_dev(int minor) 2003 { 2004 int r, numa_node_id = dm_get_numa_node(); 2005 struct mapped_device *md; 2006 void *old_md; 2007 2008 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 2009 if (!md) { 2010 DMERR("unable to allocate device, out of memory."); 2011 return NULL; 2012 } 2013 2014 if (!try_module_get(THIS_MODULE)) 2015 goto bad_module_get; 2016 2017 /* get a minor number for the dev */ 2018 if (minor == DM_ANY_MINOR) 2019 r = next_free_minor(&minor); 2020 else 2021 r = specific_minor(minor); 2022 if (r < 0) 2023 goto bad_minor; 2024 2025 r = init_srcu_struct(&md->io_barrier); 2026 if (r < 0) 2027 goto bad_io_barrier; 2028 2029 md->numa_node_id = numa_node_id; 2030 md->init_tio_pdu = false; 2031 md->type = DM_TYPE_NONE; 2032 mutex_init(&md->suspend_lock); 2033 mutex_init(&md->type_lock); 2034 mutex_init(&md->table_devices_lock); 2035 spin_lock_init(&md->deferred_lock); 2036 atomic_set(&md->holders, 1); 2037 atomic_set(&md->open_count, 0); 2038 atomic_set(&md->event_nr, 0); 2039 atomic_set(&md->uevent_seq, 0); 2040 INIT_LIST_HEAD(&md->uevent_list); 2041 INIT_LIST_HEAD(&md->table_devices); 2042 spin_lock_init(&md->uevent_lock); 2043 2044 /* 2045 * default to bio-based until DM table is loaded and md->type 2046 * established. If request-based table is loaded: blk-mq will 2047 * override accordingly. 2048 */ 2049 md->disk = blk_alloc_disk(md->numa_node_id); 2050 if (!md->disk) 2051 goto bad; 2052 md->queue = md->disk->queue; 2053 2054 init_waitqueue_head(&md->wait); 2055 INIT_WORK(&md->work, dm_wq_work); 2056 INIT_WORK(&md->requeue_work, dm_wq_requeue_work); 2057 init_waitqueue_head(&md->eventq); 2058 init_completion(&md->kobj_holder.completion); 2059 2060 md->requeue_list = NULL; 2061 md->swap_bios = get_swap_bios(); 2062 sema_init(&md->swap_bios_semaphore, md->swap_bios); 2063 mutex_init(&md->swap_bios_lock); 2064 2065 md->disk->major = _major; 2066 md->disk->first_minor = minor; 2067 md->disk->minors = 1; 2068 md->disk->flags |= GENHD_FL_NO_PART; 2069 md->disk->fops = &dm_blk_dops; 2070 md->disk->private_data = md; 2071 sprintf(md->disk->disk_name, "dm-%d", minor); 2072 2073 if (IS_ENABLED(CONFIG_FS_DAX)) { 2074 md->dax_dev = alloc_dax(md, &dm_dax_ops); 2075 if (IS_ERR(md->dax_dev)) { 2076 md->dax_dev = NULL; 2077 goto bad; 2078 } 2079 set_dax_nocache(md->dax_dev); 2080 set_dax_nomc(md->dax_dev); 2081 if (dax_add_host(md->dax_dev, md->disk)) 2082 goto bad; 2083 } 2084 2085 format_dev_t(md->name, MKDEV(_major, minor)); 2086 2087 md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name); 2088 if (!md->wq) 2089 goto bad; 2090 2091 md->pending_io = alloc_percpu(unsigned long); 2092 if (!md->pending_io) 2093 goto bad; 2094 2095 dm_stats_init(&md->stats); 2096 2097 /* Populate the mapping, nobody knows we exist yet */ 2098 spin_lock(&_minor_lock); 2099 old_md = idr_replace(&_minor_idr, md, minor); 2100 spin_unlock(&_minor_lock); 2101 2102 BUG_ON(old_md != MINOR_ALLOCED); 2103 2104 return md; 2105 2106 bad: 2107 cleanup_mapped_device(md); 2108 bad_io_barrier: 2109 free_minor(minor); 2110 bad_minor: 2111 module_put(THIS_MODULE); 2112 bad_module_get: 2113 kvfree(md); 2114 return NULL; 2115 } 2116 2117 static void unlock_fs(struct mapped_device *md); 2118 2119 static void free_dev(struct mapped_device *md) 2120 { 2121 int minor = MINOR(disk_devt(md->disk)); 2122 2123 unlock_fs(md); 2124 2125 cleanup_mapped_device(md); 2126 2127 WARN_ON_ONCE(!list_empty(&md->table_devices)); 2128 dm_stats_cleanup(&md->stats); 2129 free_minor(minor); 2130 2131 module_put(THIS_MODULE); 2132 kvfree(md); 2133 } 2134 2135 /* 2136 * Bind a table to the device. 2137 */ 2138 static void event_callback(void *context) 2139 { 2140 unsigned long flags; 2141 LIST_HEAD(uevents); 2142 struct mapped_device *md = (struct mapped_device *) context; 2143 2144 spin_lock_irqsave(&md->uevent_lock, flags); 2145 list_splice_init(&md->uevent_list, &uevents); 2146 spin_unlock_irqrestore(&md->uevent_lock, flags); 2147 2148 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 2149 2150 atomic_inc(&md->event_nr); 2151 wake_up(&md->eventq); 2152 dm_issue_global_event(); 2153 } 2154 2155 /* 2156 * Returns old map, which caller must destroy. 2157 */ 2158 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2159 struct queue_limits *limits) 2160 { 2161 struct dm_table *old_map; 2162 sector_t size; 2163 int ret; 2164 2165 lockdep_assert_held(&md->suspend_lock); 2166 2167 size = dm_table_get_size(t); 2168 2169 /* 2170 * Wipe any geometry if the size of the table changed. 2171 */ 2172 if (size != dm_get_size(md)) 2173 memset(&md->geometry, 0, sizeof(md->geometry)); 2174 2175 if (!get_capacity(md->disk)) 2176 set_capacity(md->disk, size); 2177 else 2178 set_capacity_and_notify(md->disk, size); 2179 2180 dm_table_event_callback(t, event_callback, md); 2181 2182 if (dm_table_request_based(t)) { 2183 /* 2184 * Leverage the fact that request-based DM targets are 2185 * immutable singletons - used to optimize dm_mq_queue_rq. 2186 */ 2187 md->immutable_target = dm_table_get_immutable_target(t); 2188 2189 /* 2190 * There is no need to reload with request-based dm because the 2191 * size of front_pad doesn't change. 2192 * 2193 * Note for future: If you are to reload bioset, prep-ed 2194 * requests in the queue may refer to bio from the old bioset, 2195 * so you must walk through the queue to unprep. 2196 */ 2197 if (!md->mempools) { 2198 md->mempools = t->mempools; 2199 t->mempools = NULL; 2200 } 2201 } else { 2202 /* 2203 * The md may already have mempools that need changing. 2204 * If so, reload bioset because front_pad may have changed 2205 * because a different table was loaded. 2206 */ 2207 dm_free_md_mempools(md->mempools); 2208 md->mempools = t->mempools; 2209 t->mempools = NULL; 2210 } 2211 2212 ret = dm_table_set_restrictions(t, md->queue, limits); 2213 if (ret) { 2214 old_map = ERR_PTR(ret); 2215 goto out; 2216 } 2217 2218 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2219 rcu_assign_pointer(md->map, (void *)t); 2220 md->immutable_target_type = dm_table_get_immutable_target_type(t); 2221 2222 if (old_map) 2223 dm_sync_table(md); 2224 out: 2225 return old_map; 2226 } 2227 2228 /* 2229 * Returns unbound table for the caller to free. 2230 */ 2231 static struct dm_table *__unbind(struct mapped_device *md) 2232 { 2233 struct dm_table *map = rcu_dereference_protected(md->map, 1); 2234 2235 if (!map) 2236 return NULL; 2237 2238 dm_table_event_callback(map, NULL, NULL); 2239 RCU_INIT_POINTER(md->map, NULL); 2240 dm_sync_table(md); 2241 2242 return map; 2243 } 2244 2245 /* 2246 * Constructor for a new device. 2247 */ 2248 int dm_create(int minor, struct mapped_device **result) 2249 { 2250 struct mapped_device *md; 2251 2252 md = alloc_dev(minor); 2253 if (!md) 2254 return -ENXIO; 2255 2256 dm_ima_reset_data(md); 2257 2258 *result = md; 2259 return 0; 2260 } 2261 2262 /* 2263 * Functions to manage md->type. 2264 * All are required to hold md->type_lock. 2265 */ 2266 void dm_lock_md_type(struct mapped_device *md) 2267 { 2268 mutex_lock(&md->type_lock); 2269 } 2270 2271 void dm_unlock_md_type(struct mapped_device *md) 2272 { 2273 mutex_unlock(&md->type_lock); 2274 } 2275 2276 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2277 { 2278 BUG_ON(!mutex_is_locked(&md->type_lock)); 2279 md->type = type; 2280 } 2281 2282 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2283 { 2284 return md->type; 2285 } 2286 2287 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 2288 { 2289 return md->immutable_target_type; 2290 } 2291 2292 /* 2293 * The queue_limits are only valid as long as you have a reference 2294 * count on 'md'. 2295 */ 2296 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2297 { 2298 BUG_ON(!atomic_read(&md->holders)); 2299 return &md->queue->limits; 2300 } 2301 EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2302 2303 /* 2304 * Setup the DM device's queue based on md's type 2305 */ 2306 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 2307 { 2308 enum dm_queue_mode type = dm_table_get_type(t); 2309 struct queue_limits limits; 2310 struct table_device *td; 2311 int r; 2312 2313 switch (type) { 2314 case DM_TYPE_REQUEST_BASED: 2315 md->disk->fops = &dm_rq_blk_dops; 2316 r = dm_mq_init_request_queue(md, t); 2317 if (r) { 2318 DMERR("Cannot initialize queue for request-based dm mapped device"); 2319 return r; 2320 } 2321 break; 2322 case DM_TYPE_BIO_BASED: 2323 case DM_TYPE_DAX_BIO_BASED: 2324 break; 2325 case DM_TYPE_NONE: 2326 WARN_ON_ONCE(true); 2327 break; 2328 } 2329 2330 r = dm_calculate_queue_limits(t, &limits); 2331 if (r) { 2332 DMERR("Cannot calculate initial queue limits"); 2333 return r; 2334 } 2335 r = dm_table_set_restrictions(t, md->queue, &limits); 2336 if (r) 2337 return r; 2338 2339 /* 2340 * Hold lock to make sure add_disk() and del_gendisk() won't concurrent 2341 * with open_table_device() and close_table_device(). 2342 */ 2343 mutex_lock(&md->table_devices_lock); 2344 r = add_disk(md->disk); 2345 mutex_unlock(&md->table_devices_lock); 2346 if (r) 2347 return r; 2348 2349 /* 2350 * Register the holder relationship for devices added before the disk 2351 * was live. 2352 */ 2353 list_for_each_entry(td, &md->table_devices, list) { 2354 r = bd_link_disk_holder(td->dm_dev.bdev, md->disk); 2355 if (r) 2356 goto out_undo_holders; 2357 } 2358 2359 r = dm_sysfs_init(md); 2360 if (r) 2361 goto out_undo_holders; 2362 2363 md->type = type; 2364 return 0; 2365 2366 out_undo_holders: 2367 list_for_each_entry_continue_reverse(td, &md->table_devices, list) 2368 bd_unlink_disk_holder(td->dm_dev.bdev, md->disk); 2369 mutex_lock(&md->table_devices_lock); 2370 del_gendisk(md->disk); 2371 mutex_unlock(&md->table_devices_lock); 2372 return r; 2373 } 2374 2375 struct mapped_device *dm_get_md(dev_t dev) 2376 { 2377 struct mapped_device *md; 2378 unsigned minor = MINOR(dev); 2379 2380 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 2381 return NULL; 2382 2383 spin_lock(&_minor_lock); 2384 2385 md = idr_find(&_minor_idr, minor); 2386 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 2387 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2388 md = NULL; 2389 goto out; 2390 } 2391 dm_get(md); 2392 out: 2393 spin_unlock(&_minor_lock); 2394 2395 return md; 2396 } 2397 EXPORT_SYMBOL_GPL(dm_get_md); 2398 2399 void *dm_get_mdptr(struct mapped_device *md) 2400 { 2401 return md->interface_ptr; 2402 } 2403 2404 void dm_set_mdptr(struct mapped_device *md, void *ptr) 2405 { 2406 md->interface_ptr = ptr; 2407 } 2408 2409 void dm_get(struct mapped_device *md) 2410 { 2411 atomic_inc(&md->holders); 2412 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2413 } 2414 2415 int dm_hold(struct mapped_device *md) 2416 { 2417 spin_lock(&_minor_lock); 2418 if (test_bit(DMF_FREEING, &md->flags)) { 2419 spin_unlock(&_minor_lock); 2420 return -EBUSY; 2421 } 2422 dm_get(md); 2423 spin_unlock(&_minor_lock); 2424 return 0; 2425 } 2426 EXPORT_SYMBOL_GPL(dm_hold); 2427 2428 const char *dm_device_name(struct mapped_device *md) 2429 { 2430 return md->name; 2431 } 2432 EXPORT_SYMBOL_GPL(dm_device_name); 2433 2434 static void __dm_destroy(struct mapped_device *md, bool wait) 2435 { 2436 struct dm_table *map; 2437 int srcu_idx; 2438 2439 might_sleep(); 2440 2441 spin_lock(&_minor_lock); 2442 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2443 set_bit(DMF_FREEING, &md->flags); 2444 spin_unlock(&_minor_lock); 2445 2446 blk_mark_disk_dead(md->disk); 2447 2448 /* 2449 * Take suspend_lock so that presuspend and postsuspend methods 2450 * do not race with internal suspend. 2451 */ 2452 mutex_lock(&md->suspend_lock); 2453 map = dm_get_live_table(md, &srcu_idx); 2454 if (!dm_suspended_md(md)) { 2455 dm_table_presuspend_targets(map); 2456 set_bit(DMF_SUSPENDED, &md->flags); 2457 set_bit(DMF_POST_SUSPENDING, &md->flags); 2458 dm_table_postsuspend_targets(map); 2459 } 2460 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2461 dm_put_live_table(md, srcu_idx); 2462 mutex_unlock(&md->suspend_lock); 2463 2464 /* 2465 * Rare, but there may be I/O requests still going to complete, 2466 * for example. Wait for all references to disappear. 2467 * No one should increment the reference count of the mapped_device, 2468 * after the mapped_device state becomes DMF_FREEING. 2469 */ 2470 if (wait) 2471 while (atomic_read(&md->holders)) 2472 msleep(1); 2473 else if (atomic_read(&md->holders)) 2474 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2475 dm_device_name(md), atomic_read(&md->holders)); 2476 2477 dm_table_destroy(__unbind(md)); 2478 free_dev(md); 2479 } 2480 2481 void dm_destroy(struct mapped_device *md) 2482 { 2483 __dm_destroy(md, true); 2484 } 2485 2486 void dm_destroy_immediate(struct mapped_device *md) 2487 { 2488 __dm_destroy(md, false); 2489 } 2490 2491 void dm_put(struct mapped_device *md) 2492 { 2493 atomic_dec(&md->holders); 2494 } 2495 EXPORT_SYMBOL_GPL(dm_put); 2496 2497 static bool dm_in_flight_bios(struct mapped_device *md) 2498 { 2499 int cpu; 2500 unsigned long sum = 0; 2501 2502 for_each_possible_cpu(cpu) 2503 sum += *per_cpu_ptr(md->pending_io, cpu); 2504 2505 return sum != 0; 2506 } 2507 2508 static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state) 2509 { 2510 int r = 0; 2511 DEFINE_WAIT(wait); 2512 2513 while (true) { 2514 prepare_to_wait(&md->wait, &wait, task_state); 2515 2516 if (!dm_in_flight_bios(md)) 2517 break; 2518 2519 if (signal_pending_state(task_state, current)) { 2520 r = -EINTR; 2521 break; 2522 } 2523 2524 io_schedule(); 2525 } 2526 finish_wait(&md->wait, &wait); 2527 2528 smp_rmb(); 2529 2530 return r; 2531 } 2532 2533 static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state) 2534 { 2535 int r = 0; 2536 2537 if (!queue_is_mq(md->queue)) 2538 return dm_wait_for_bios_completion(md, task_state); 2539 2540 while (true) { 2541 if (!blk_mq_queue_inflight(md->queue)) 2542 break; 2543 2544 if (signal_pending_state(task_state, current)) { 2545 r = -EINTR; 2546 break; 2547 } 2548 2549 msleep(5); 2550 } 2551 2552 return r; 2553 } 2554 2555 /* 2556 * Process the deferred bios 2557 */ 2558 static void dm_wq_work(struct work_struct *work) 2559 { 2560 struct mapped_device *md = container_of(work, struct mapped_device, work); 2561 struct bio *bio; 2562 2563 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2564 spin_lock_irq(&md->deferred_lock); 2565 bio = bio_list_pop(&md->deferred); 2566 spin_unlock_irq(&md->deferred_lock); 2567 2568 if (!bio) 2569 break; 2570 2571 submit_bio_noacct(bio); 2572 } 2573 } 2574 2575 static void dm_queue_flush(struct mapped_device *md) 2576 { 2577 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2578 smp_mb__after_atomic(); 2579 queue_work(md->wq, &md->work); 2580 } 2581 2582 /* 2583 * Swap in a new table, returning the old one for the caller to destroy. 2584 */ 2585 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 2586 { 2587 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2588 struct queue_limits limits; 2589 int r; 2590 2591 mutex_lock(&md->suspend_lock); 2592 2593 /* device must be suspended */ 2594 if (!dm_suspended_md(md)) 2595 goto out; 2596 2597 /* 2598 * If the new table has no data devices, retain the existing limits. 2599 * This helps multipath with queue_if_no_path if all paths disappear, 2600 * then new I/O is queued based on these limits, and then some paths 2601 * reappear. 2602 */ 2603 if (dm_table_has_no_data_devices(table)) { 2604 live_map = dm_get_live_table_fast(md); 2605 if (live_map) 2606 limits = md->queue->limits; 2607 dm_put_live_table_fast(md); 2608 } 2609 2610 if (!live_map) { 2611 r = dm_calculate_queue_limits(table, &limits); 2612 if (r) { 2613 map = ERR_PTR(r); 2614 goto out; 2615 } 2616 } 2617 2618 map = __bind(md, table, &limits); 2619 dm_issue_global_event(); 2620 2621 out: 2622 mutex_unlock(&md->suspend_lock); 2623 return map; 2624 } 2625 2626 /* 2627 * Functions to lock and unlock any filesystem running on the 2628 * device. 2629 */ 2630 static int lock_fs(struct mapped_device *md) 2631 { 2632 int r; 2633 2634 WARN_ON(test_bit(DMF_FROZEN, &md->flags)); 2635 2636 r = freeze_bdev(md->disk->part0); 2637 if (!r) 2638 set_bit(DMF_FROZEN, &md->flags); 2639 return r; 2640 } 2641 2642 static void unlock_fs(struct mapped_device *md) 2643 { 2644 if (!test_bit(DMF_FROZEN, &md->flags)) 2645 return; 2646 thaw_bdev(md->disk->part0); 2647 clear_bit(DMF_FROZEN, &md->flags); 2648 } 2649 2650 /* 2651 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2652 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2653 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2654 * 2655 * If __dm_suspend returns 0, the device is completely quiescent 2656 * now. There is no request-processing activity. All new requests 2657 * are being added to md->deferred list. 2658 */ 2659 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2660 unsigned suspend_flags, unsigned int task_state, 2661 int dmf_suspended_flag) 2662 { 2663 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2664 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2665 int r; 2666 2667 lockdep_assert_held(&md->suspend_lock); 2668 2669 /* 2670 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 2671 * This flag is cleared before dm_suspend returns. 2672 */ 2673 if (noflush) 2674 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2675 else 2676 DMDEBUG("%s: suspending with flush", dm_device_name(md)); 2677 2678 /* 2679 * This gets reverted if there's an error later and the targets 2680 * provide the .presuspend_undo hook. 2681 */ 2682 dm_table_presuspend_targets(map); 2683 2684 /* 2685 * Flush I/O to the device. 2686 * Any I/O submitted after lock_fs() may not be flushed. 2687 * noflush takes precedence over do_lockfs. 2688 * (lock_fs() flushes I/Os and waits for them to complete.) 2689 */ 2690 if (!noflush && do_lockfs) { 2691 r = lock_fs(md); 2692 if (r) { 2693 dm_table_presuspend_undo_targets(map); 2694 return r; 2695 } 2696 } 2697 2698 /* 2699 * Here we must make sure that no processes are submitting requests 2700 * to target drivers i.e. no one may be executing 2701 * dm_split_and_process_bio from dm_submit_bio. 2702 * 2703 * To get all processes out of dm_split_and_process_bio in dm_submit_bio, 2704 * we take the write lock. To prevent any process from reentering 2705 * dm_split_and_process_bio from dm_submit_bio and quiesce the thread 2706 * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call 2707 * flush_workqueue(md->wq). 2708 */ 2709 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2710 if (map) 2711 synchronize_srcu(&md->io_barrier); 2712 2713 /* 2714 * Stop md->queue before flushing md->wq in case request-based 2715 * dm defers requests to md->wq from md->queue. 2716 */ 2717 if (dm_request_based(md)) 2718 dm_stop_queue(md->queue); 2719 2720 flush_workqueue(md->wq); 2721 2722 /* 2723 * At this point no more requests are entering target request routines. 2724 * We call dm_wait_for_completion to wait for all existing requests 2725 * to finish. 2726 */ 2727 r = dm_wait_for_completion(md, task_state); 2728 if (!r) 2729 set_bit(dmf_suspended_flag, &md->flags); 2730 2731 if (noflush) 2732 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2733 if (map) 2734 synchronize_srcu(&md->io_barrier); 2735 2736 /* were we interrupted ? */ 2737 if (r < 0) { 2738 dm_queue_flush(md); 2739 2740 if (dm_request_based(md)) 2741 dm_start_queue(md->queue); 2742 2743 unlock_fs(md); 2744 dm_table_presuspend_undo_targets(map); 2745 /* pushback list is already flushed, so skip flush */ 2746 } 2747 2748 return r; 2749 } 2750 2751 /* 2752 * We need to be able to change a mapping table under a mounted 2753 * filesystem. For example we might want to move some data in 2754 * the background. Before the table can be swapped with 2755 * dm_bind_table, dm_suspend must be called to flush any in 2756 * flight bios and ensure that any further io gets deferred. 2757 */ 2758 /* 2759 * Suspend mechanism in request-based dm. 2760 * 2761 * 1. Flush all I/Os by lock_fs() if needed. 2762 * 2. Stop dispatching any I/O by stopping the request_queue. 2763 * 3. Wait for all in-flight I/Os to be completed or requeued. 2764 * 2765 * To abort suspend, start the request_queue. 2766 */ 2767 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2768 { 2769 struct dm_table *map = NULL; 2770 int r = 0; 2771 2772 retry: 2773 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2774 2775 if (dm_suspended_md(md)) { 2776 r = -EINVAL; 2777 goto out_unlock; 2778 } 2779 2780 if (dm_suspended_internally_md(md)) { 2781 /* already internally suspended, wait for internal resume */ 2782 mutex_unlock(&md->suspend_lock); 2783 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2784 if (r) 2785 return r; 2786 goto retry; 2787 } 2788 2789 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2790 2791 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2792 if (r) 2793 goto out_unlock; 2794 2795 set_bit(DMF_POST_SUSPENDING, &md->flags); 2796 dm_table_postsuspend_targets(map); 2797 clear_bit(DMF_POST_SUSPENDING, &md->flags); 2798 2799 out_unlock: 2800 mutex_unlock(&md->suspend_lock); 2801 return r; 2802 } 2803 2804 static int __dm_resume(struct mapped_device *md, struct dm_table *map) 2805 { 2806 if (map) { 2807 int r = dm_table_resume_targets(map); 2808 if (r) 2809 return r; 2810 } 2811 2812 dm_queue_flush(md); 2813 2814 /* 2815 * Flushing deferred I/Os must be done after targets are resumed 2816 * so that mapping of targets can work correctly. 2817 * Request-based dm is queueing the deferred I/Os in its request_queue. 2818 */ 2819 if (dm_request_based(md)) 2820 dm_start_queue(md->queue); 2821 2822 unlock_fs(md); 2823 2824 return 0; 2825 } 2826 2827 int dm_resume(struct mapped_device *md) 2828 { 2829 int r; 2830 struct dm_table *map = NULL; 2831 2832 retry: 2833 r = -EINVAL; 2834 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2835 2836 if (!dm_suspended_md(md)) 2837 goto out; 2838 2839 if (dm_suspended_internally_md(md)) { 2840 /* already internally suspended, wait for internal resume */ 2841 mutex_unlock(&md->suspend_lock); 2842 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2843 if (r) 2844 return r; 2845 goto retry; 2846 } 2847 2848 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2849 if (!map || !dm_table_get_size(map)) 2850 goto out; 2851 2852 r = __dm_resume(md, map); 2853 if (r) 2854 goto out; 2855 2856 clear_bit(DMF_SUSPENDED, &md->flags); 2857 out: 2858 mutex_unlock(&md->suspend_lock); 2859 2860 return r; 2861 } 2862 2863 /* 2864 * Internal suspend/resume works like userspace-driven suspend. It waits 2865 * until all bios finish and prevents issuing new bios to the target drivers. 2866 * It may be used only from the kernel. 2867 */ 2868 2869 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2870 { 2871 struct dm_table *map = NULL; 2872 2873 lockdep_assert_held(&md->suspend_lock); 2874 2875 if (md->internal_suspend_count++) 2876 return; /* nested internal suspend */ 2877 2878 if (dm_suspended_md(md)) { 2879 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2880 return; /* nest suspend */ 2881 } 2882 2883 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2884 2885 /* 2886 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2887 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2888 * would require changing .presuspend to return an error -- avoid this 2889 * until there is a need for more elaborate variants of internal suspend. 2890 */ 2891 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2892 DMF_SUSPENDED_INTERNALLY); 2893 2894 set_bit(DMF_POST_SUSPENDING, &md->flags); 2895 dm_table_postsuspend_targets(map); 2896 clear_bit(DMF_POST_SUSPENDING, &md->flags); 2897 } 2898 2899 static void __dm_internal_resume(struct mapped_device *md) 2900 { 2901 BUG_ON(!md->internal_suspend_count); 2902 2903 if (--md->internal_suspend_count) 2904 return; /* resume from nested internal suspend */ 2905 2906 if (dm_suspended_md(md)) 2907 goto done; /* resume from nested suspend */ 2908 2909 /* 2910 * NOTE: existing callers don't need to call dm_table_resume_targets 2911 * (which may fail -- so best to avoid it for now by passing NULL map) 2912 */ 2913 (void) __dm_resume(md, NULL); 2914 2915 done: 2916 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2917 smp_mb__after_atomic(); 2918 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2919 } 2920 2921 void dm_internal_suspend_noflush(struct mapped_device *md) 2922 { 2923 mutex_lock(&md->suspend_lock); 2924 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2925 mutex_unlock(&md->suspend_lock); 2926 } 2927 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2928 2929 void dm_internal_resume(struct mapped_device *md) 2930 { 2931 mutex_lock(&md->suspend_lock); 2932 __dm_internal_resume(md); 2933 mutex_unlock(&md->suspend_lock); 2934 } 2935 EXPORT_SYMBOL_GPL(dm_internal_resume); 2936 2937 /* 2938 * Fast variants of internal suspend/resume hold md->suspend_lock, 2939 * which prevents interaction with userspace-driven suspend. 2940 */ 2941 2942 void dm_internal_suspend_fast(struct mapped_device *md) 2943 { 2944 mutex_lock(&md->suspend_lock); 2945 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2946 return; 2947 2948 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2949 synchronize_srcu(&md->io_barrier); 2950 flush_workqueue(md->wq); 2951 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2952 } 2953 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2954 2955 void dm_internal_resume_fast(struct mapped_device *md) 2956 { 2957 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2958 goto done; 2959 2960 dm_queue_flush(md); 2961 2962 done: 2963 mutex_unlock(&md->suspend_lock); 2964 } 2965 EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2966 2967 /*----------------------------------------------------------------- 2968 * Event notification. 2969 *---------------------------------------------------------------*/ 2970 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 2971 unsigned cookie) 2972 { 2973 int r; 2974 unsigned noio_flag; 2975 char udev_cookie[DM_COOKIE_LENGTH]; 2976 char *envp[] = { udev_cookie, NULL }; 2977 2978 noio_flag = memalloc_noio_save(); 2979 2980 if (!cookie) 2981 r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 2982 else { 2983 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 2984 DM_COOKIE_ENV_VAR_NAME, cookie); 2985 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 2986 action, envp); 2987 } 2988 2989 memalloc_noio_restore(noio_flag); 2990 2991 return r; 2992 } 2993 2994 uint32_t dm_next_uevent_seq(struct mapped_device *md) 2995 { 2996 return atomic_add_return(1, &md->uevent_seq); 2997 } 2998 2999 uint32_t dm_get_event_nr(struct mapped_device *md) 3000 { 3001 return atomic_read(&md->event_nr); 3002 } 3003 3004 int dm_wait_event(struct mapped_device *md, int event_nr) 3005 { 3006 return wait_event_interruptible(md->eventq, 3007 (event_nr != atomic_read(&md->event_nr))); 3008 } 3009 3010 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 3011 { 3012 unsigned long flags; 3013 3014 spin_lock_irqsave(&md->uevent_lock, flags); 3015 list_add(elist, &md->uevent_list); 3016 spin_unlock_irqrestore(&md->uevent_lock, flags); 3017 } 3018 3019 /* 3020 * The gendisk is only valid as long as you have a reference 3021 * count on 'md'. 3022 */ 3023 struct gendisk *dm_disk(struct mapped_device *md) 3024 { 3025 return md->disk; 3026 } 3027 EXPORT_SYMBOL_GPL(dm_disk); 3028 3029 struct kobject *dm_kobject(struct mapped_device *md) 3030 { 3031 return &md->kobj_holder.kobj; 3032 } 3033 3034 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 3035 { 3036 struct mapped_device *md; 3037 3038 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 3039 3040 spin_lock(&_minor_lock); 3041 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 3042 md = NULL; 3043 goto out; 3044 } 3045 dm_get(md); 3046 out: 3047 spin_unlock(&_minor_lock); 3048 3049 return md; 3050 } 3051 3052 int dm_suspended_md(struct mapped_device *md) 3053 { 3054 return test_bit(DMF_SUSPENDED, &md->flags); 3055 } 3056 3057 static int dm_post_suspending_md(struct mapped_device *md) 3058 { 3059 return test_bit(DMF_POST_SUSPENDING, &md->flags); 3060 } 3061 3062 int dm_suspended_internally_md(struct mapped_device *md) 3063 { 3064 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3065 } 3066 3067 int dm_test_deferred_remove_flag(struct mapped_device *md) 3068 { 3069 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 3070 } 3071 3072 int dm_suspended(struct dm_target *ti) 3073 { 3074 return dm_suspended_md(ti->table->md); 3075 } 3076 EXPORT_SYMBOL_GPL(dm_suspended); 3077 3078 int dm_post_suspending(struct dm_target *ti) 3079 { 3080 return dm_post_suspending_md(ti->table->md); 3081 } 3082 EXPORT_SYMBOL_GPL(dm_post_suspending); 3083 3084 int dm_noflush_suspending(struct dm_target *ti) 3085 { 3086 return __noflush_suspending(ti->table->md); 3087 } 3088 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 3089 3090 void dm_free_md_mempools(struct dm_md_mempools *pools) 3091 { 3092 if (!pools) 3093 return; 3094 3095 bioset_exit(&pools->bs); 3096 bioset_exit(&pools->io_bs); 3097 3098 kfree(pools); 3099 } 3100 3101 struct dm_pr { 3102 u64 old_key; 3103 u64 new_key; 3104 u32 flags; 3105 bool abort; 3106 bool fail_early; 3107 int ret; 3108 enum pr_type type; 3109 }; 3110 3111 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 3112 struct dm_pr *pr) 3113 { 3114 struct mapped_device *md = bdev->bd_disk->private_data; 3115 struct dm_table *table; 3116 struct dm_target *ti; 3117 int ret = -ENOTTY, srcu_idx; 3118 3119 table = dm_get_live_table(md, &srcu_idx); 3120 if (!table || !dm_table_get_size(table)) 3121 goto out; 3122 3123 /* We only support devices that have a single target */ 3124 if (table->num_targets != 1) 3125 goto out; 3126 ti = dm_table_get_target(table, 0); 3127 3128 if (dm_suspended_md(md)) { 3129 ret = -EAGAIN; 3130 goto out; 3131 } 3132 3133 ret = -EINVAL; 3134 if (!ti->type->iterate_devices) 3135 goto out; 3136 3137 ti->type->iterate_devices(ti, fn, pr); 3138 ret = 0; 3139 out: 3140 dm_put_live_table(md, srcu_idx); 3141 return ret; 3142 } 3143 3144 /* 3145 * For register / unregister we need to manually call out to every path. 3146 */ 3147 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 3148 sector_t start, sector_t len, void *data) 3149 { 3150 struct dm_pr *pr = data; 3151 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3152 int ret; 3153 3154 if (!ops || !ops->pr_register) { 3155 pr->ret = -EOPNOTSUPP; 3156 return -1; 3157 } 3158 3159 ret = ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 3160 if (!ret) 3161 return 0; 3162 3163 if (!pr->ret) 3164 pr->ret = ret; 3165 3166 if (pr->fail_early) 3167 return -1; 3168 3169 return 0; 3170 } 3171 3172 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 3173 u32 flags) 3174 { 3175 struct dm_pr pr = { 3176 .old_key = old_key, 3177 .new_key = new_key, 3178 .flags = flags, 3179 .fail_early = true, 3180 .ret = 0, 3181 }; 3182 int ret; 3183 3184 ret = dm_call_pr(bdev, __dm_pr_register, &pr); 3185 if (ret) { 3186 /* Didn't even get to register a path */ 3187 return ret; 3188 } 3189 3190 if (!pr.ret) 3191 return 0; 3192 ret = pr.ret; 3193 3194 if (!new_key) 3195 return ret; 3196 3197 /* unregister all paths if we failed to register any path */ 3198 pr.old_key = new_key; 3199 pr.new_key = 0; 3200 pr.flags = 0; 3201 pr.fail_early = false; 3202 (void) dm_call_pr(bdev, __dm_pr_register, &pr); 3203 return ret; 3204 } 3205 3206 3207 static int __dm_pr_reserve(struct dm_target *ti, struct dm_dev *dev, 3208 sector_t start, sector_t len, void *data) 3209 { 3210 struct dm_pr *pr = data; 3211 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3212 3213 if (!ops || !ops->pr_reserve) { 3214 pr->ret = -EOPNOTSUPP; 3215 return -1; 3216 } 3217 3218 pr->ret = ops->pr_reserve(dev->bdev, pr->old_key, pr->type, pr->flags); 3219 if (!pr->ret) 3220 return -1; 3221 3222 return 0; 3223 } 3224 3225 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 3226 u32 flags) 3227 { 3228 struct dm_pr pr = { 3229 .old_key = key, 3230 .flags = flags, 3231 .type = type, 3232 .fail_early = false, 3233 .ret = 0, 3234 }; 3235 int ret; 3236 3237 ret = dm_call_pr(bdev, __dm_pr_reserve, &pr); 3238 if (ret) 3239 return ret; 3240 3241 return pr.ret; 3242 } 3243 3244 /* 3245 * If there is a non-All Registrants type of reservation, the release must be 3246 * sent down the holding path. For the cases where there is no reservation or 3247 * the path is not the holder the device will also return success, so we must 3248 * try each path to make sure we got the correct path. 3249 */ 3250 static int __dm_pr_release(struct dm_target *ti, struct dm_dev *dev, 3251 sector_t start, sector_t len, void *data) 3252 { 3253 struct dm_pr *pr = data; 3254 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3255 3256 if (!ops || !ops->pr_release) { 3257 pr->ret = -EOPNOTSUPP; 3258 return -1; 3259 } 3260 3261 pr->ret = ops->pr_release(dev->bdev, pr->old_key, pr->type); 3262 if (pr->ret) 3263 return -1; 3264 3265 return 0; 3266 } 3267 3268 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 3269 { 3270 struct dm_pr pr = { 3271 .old_key = key, 3272 .type = type, 3273 .fail_early = false, 3274 }; 3275 int ret; 3276 3277 ret = dm_call_pr(bdev, __dm_pr_release, &pr); 3278 if (ret) 3279 return ret; 3280 3281 return pr.ret; 3282 } 3283 3284 static int __dm_pr_preempt(struct dm_target *ti, struct dm_dev *dev, 3285 sector_t start, sector_t len, void *data) 3286 { 3287 struct dm_pr *pr = data; 3288 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3289 3290 if (!ops || !ops->pr_preempt) { 3291 pr->ret = -EOPNOTSUPP; 3292 return -1; 3293 } 3294 3295 pr->ret = ops->pr_preempt(dev->bdev, pr->old_key, pr->new_key, pr->type, 3296 pr->abort); 3297 if (!pr->ret) 3298 return -1; 3299 3300 return 0; 3301 } 3302 3303 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 3304 enum pr_type type, bool abort) 3305 { 3306 struct dm_pr pr = { 3307 .new_key = new_key, 3308 .old_key = old_key, 3309 .type = type, 3310 .fail_early = false, 3311 }; 3312 int ret; 3313 3314 ret = dm_call_pr(bdev, __dm_pr_preempt, &pr); 3315 if (ret) 3316 return ret; 3317 3318 return pr.ret; 3319 } 3320 3321 static int dm_pr_clear(struct block_device *bdev, u64 key) 3322 { 3323 struct mapped_device *md = bdev->bd_disk->private_data; 3324 const struct pr_ops *ops; 3325 int r, srcu_idx; 3326 3327 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3328 if (r < 0) 3329 goto out; 3330 3331 ops = bdev->bd_disk->fops->pr_ops; 3332 if (ops && ops->pr_clear) 3333 r = ops->pr_clear(bdev, key); 3334 else 3335 r = -EOPNOTSUPP; 3336 out: 3337 dm_unprepare_ioctl(md, srcu_idx); 3338 return r; 3339 } 3340 3341 static const struct pr_ops dm_pr_ops = { 3342 .pr_register = dm_pr_register, 3343 .pr_reserve = dm_pr_reserve, 3344 .pr_release = dm_pr_release, 3345 .pr_preempt = dm_pr_preempt, 3346 .pr_clear = dm_pr_clear, 3347 }; 3348 3349 static const struct block_device_operations dm_blk_dops = { 3350 .submit_bio = dm_submit_bio, 3351 .poll_bio = dm_poll_bio, 3352 .open = dm_blk_open, 3353 .release = dm_blk_close, 3354 .ioctl = dm_blk_ioctl, 3355 .getgeo = dm_blk_getgeo, 3356 .report_zones = dm_blk_report_zones, 3357 .pr_ops = &dm_pr_ops, 3358 .owner = THIS_MODULE 3359 }; 3360 3361 static const struct block_device_operations dm_rq_blk_dops = { 3362 .open = dm_blk_open, 3363 .release = dm_blk_close, 3364 .ioctl = dm_blk_ioctl, 3365 .getgeo = dm_blk_getgeo, 3366 .pr_ops = &dm_pr_ops, 3367 .owner = THIS_MODULE 3368 }; 3369 3370 static const struct dax_operations dm_dax_ops = { 3371 .direct_access = dm_dax_direct_access, 3372 .zero_page_range = dm_dax_zero_page_range, 3373 .recovery_write = dm_dax_recovery_write, 3374 }; 3375 3376 /* 3377 * module hooks 3378 */ 3379 module_init(dm_init); 3380 module_exit(dm_exit); 3381 3382 module_param(major, uint, 0); 3383 MODULE_PARM_DESC(major, "The major number of the device mapper"); 3384 3385 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3386 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3387 3388 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3389 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3390 3391 module_param(swap_bios, int, S_IRUGO | S_IWUSR); 3392 MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs"); 3393 3394 MODULE_DESCRIPTION(DM_NAME " driver"); 3395 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3396 MODULE_LICENSE("GPL"); 3397