1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-core.h" 9 #include "dm-rq.h" 10 #include "dm-uevent.h" 11 #include "dm-ima.h" 12 13 #include <linux/init.h> 14 #include <linux/module.h> 15 #include <linux/mutex.h> 16 #include <linux/sched/mm.h> 17 #include <linux/sched/signal.h> 18 #include <linux/blkpg.h> 19 #include <linux/bio.h> 20 #include <linux/mempool.h> 21 #include <linux/dax.h> 22 #include <linux/slab.h> 23 #include <linux/idr.h> 24 #include <linux/uio.h> 25 #include <linux/hdreg.h> 26 #include <linux/delay.h> 27 #include <linux/wait.h> 28 #include <linux/pr.h> 29 #include <linux/refcount.h> 30 #include <linux/part_stat.h> 31 #include <linux/blk-crypto.h> 32 #include <linux/blk-crypto-profile.h> 33 34 #define DM_MSG_PREFIX "core" 35 36 /* 37 * Cookies are numeric values sent with CHANGE and REMOVE 38 * uevents while resuming, removing or renaming the device. 39 */ 40 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 41 #define DM_COOKIE_LENGTH 24 42 43 /* 44 * For REQ_POLLED fs bio, this flag is set if we link mapped underlying 45 * dm_io into one list, and reuse bio->bi_private as the list head. Before 46 * ending this fs bio, we will recover its ->bi_private. 47 */ 48 #define REQ_DM_POLL_LIST REQ_DRV 49 50 static const char *_name = DM_NAME; 51 52 static unsigned int major = 0; 53 static unsigned int _major = 0; 54 55 static DEFINE_IDR(_minor_idr); 56 57 static DEFINE_SPINLOCK(_minor_lock); 58 59 static void do_deferred_remove(struct work_struct *w); 60 61 static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 62 63 static struct workqueue_struct *deferred_remove_workqueue; 64 65 atomic_t dm_global_event_nr = ATOMIC_INIT(0); 66 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 67 68 void dm_issue_global_event(void) 69 { 70 atomic_inc(&dm_global_event_nr); 71 wake_up(&dm_global_eventq); 72 } 73 74 /* 75 * One of these is allocated (on-stack) per original bio. 76 */ 77 struct clone_info { 78 struct dm_table *map; 79 struct bio *bio; 80 struct dm_io *io; 81 sector_t sector; 82 unsigned sector_count; 83 bool submit_as_polled; 84 }; 85 86 #define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone)) 87 #define DM_IO_BIO_OFFSET \ 88 (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio)) 89 90 static inline struct dm_target_io *clone_to_tio(struct bio *clone) 91 { 92 return container_of(clone, struct dm_target_io, clone); 93 } 94 95 void *dm_per_bio_data(struct bio *bio, size_t data_size) 96 { 97 if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO)) 98 return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; 99 return (char *)bio - DM_IO_BIO_OFFSET - data_size; 100 } 101 EXPORT_SYMBOL_GPL(dm_per_bio_data); 102 103 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 104 { 105 struct dm_io *io = (struct dm_io *)((char *)data + data_size); 106 if (io->magic == DM_IO_MAGIC) 107 return (struct bio *)((char *)io + DM_IO_BIO_OFFSET); 108 BUG_ON(io->magic != DM_TIO_MAGIC); 109 return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET); 110 } 111 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 112 113 unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 114 { 115 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 116 } 117 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 118 119 #define MINOR_ALLOCED ((void *)-1) 120 121 #define DM_NUMA_NODE NUMA_NO_NODE 122 static int dm_numa_node = DM_NUMA_NODE; 123 124 #define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE) 125 static int swap_bios = DEFAULT_SWAP_BIOS; 126 static int get_swap_bios(void) 127 { 128 int latch = READ_ONCE(swap_bios); 129 if (unlikely(latch <= 0)) 130 latch = DEFAULT_SWAP_BIOS; 131 return latch; 132 } 133 134 /* 135 * For mempools pre-allocation at the table loading time. 136 */ 137 struct dm_md_mempools { 138 struct bio_set bs; 139 struct bio_set io_bs; 140 }; 141 142 struct table_device { 143 struct list_head list; 144 refcount_t count; 145 struct dm_dev dm_dev; 146 }; 147 148 /* 149 * Bio-based DM's mempools' reserved IOs set by the user. 150 */ 151 #define RESERVED_BIO_BASED_IOS 16 152 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 153 154 static int __dm_get_module_param_int(int *module_param, int min, int max) 155 { 156 int param = READ_ONCE(*module_param); 157 int modified_param = 0; 158 bool modified = true; 159 160 if (param < min) 161 modified_param = min; 162 else if (param > max) 163 modified_param = max; 164 else 165 modified = false; 166 167 if (modified) { 168 (void)cmpxchg(module_param, param, modified_param); 169 param = modified_param; 170 } 171 172 return param; 173 } 174 175 unsigned __dm_get_module_param(unsigned *module_param, 176 unsigned def, unsigned max) 177 { 178 unsigned param = READ_ONCE(*module_param); 179 unsigned modified_param = 0; 180 181 if (!param) 182 modified_param = def; 183 else if (param > max) 184 modified_param = max; 185 186 if (modified_param) { 187 (void)cmpxchg(module_param, param, modified_param); 188 param = modified_param; 189 } 190 191 return param; 192 } 193 194 unsigned dm_get_reserved_bio_based_ios(void) 195 { 196 return __dm_get_module_param(&reserved_bio_based_ios, 197 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 198 } 199 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 200 201 static unsigned dm_get_numa_node(void) 202 { 203 return __dm_get_module_param_int(&dm_numa_node, 204 DM_NUMA_NODE, num_online_nodes() - 1); 205 } 206 207 static int __init local_init(void) 208 { 209 int r; 210 211 r = dm_uevent_init(); 212 if (r) 213 return r; 214 215 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 216 if (!deferred_remove_workqueue) { 217 r = -ENOMEM; 218 goto out_uevent_exit; 219 } 220 221 _major = major; 222 r = register_blkdev(_major, _name); 223 if (r < 0) 224 goto out_free_workqueue; 225 226 if (!_major) 227 _major = r; 228 229 return 0; 230 231 out_free_workqueue: 232 destroy_workqueue(deferred_remove_workqueue); 233 out_uevent_exit: 234 dm_uevent_exit(); 235 236 return r; 237 } 238 239 static void local_exit(void) 240 { 241 flush_scheduled_work(); 242 destroy_workqueue(deferred_remove_workqueue); 243 244 unregister_blkdev(_major, _name); 245 dm_uevent_exit(); 246 247 _major = 0; 248 249 DMINFO("cleaned up"); 250 } 251 252 static int (*_inits[])(void) __initdata = { 253 local_init, 254 dm_target_init, 255 dm_linear_init, 256 dm_stripe_init, 257 dm_io_init, 258 dm_kcopyd_init, 259 dm_interface_init, 260 dm_statistics_init, 261 }; 262 263 static void (*_exits[])(void) = { 264 local_exit, 265 dm_target_exit, 266 dm_linear_exit, 267 dm_stripe_exit, 268 dm_io_exit, 269 dm_kcopyd_exit, 270 dm_interface_exit, 271 dm_statistics_exit, 272 }; 273 274 static int __init dm_init(void) 275 { 276 const int count = ARRAY_SIZE(_inits); 277 int r, i; 278 279 #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) 280 DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled." 281 " Duplicate IMA measurements will not be recorded in the IMA log."); 282 #endif 283 284 for (i = 0; i < count; i++) { 285 r = _inits[i](); 286 if (r) 287 goto bad; 288 } 289 290 return 0; 291 bad: 292 while (i--) 293 _exits[i](); 294 295 return r; 296 } 297 298 static void __exit dm_exit(void) 299 { 300 int i = ARRAY_SIZE(_exits); 301 302 while (i--) 303 _exits[i](); 304 305 /* 306 * Should be empty by this point. 307 */ 308 idr_destroy(&_minor_idr); 309 } 310 311 /* 312 * Block device functions 313 */ 314 int dm_deleting_md(struct mapped_device *md) 315 { 316 return test_bit(DMF_DELETING, &md->flags); 317 } 318 319 static int dm_blk_open(struct block_device *bdev, fmode_t mode) 320 { 321 struct mapped_device *md; 322 323 spin_lock(&_minor_lock); 324 325 md = bdev->bd_disk->private_data; 326 if (!md) 327 goto out; 328 329 if (test_bit(DMF_FREEING, &md->flags) || 330 dm_deleting_md(md)) { 331 md = NULL; 332 goto out; 333 } 334 335 dm_get(md); 336 atomic_inc(&md->open_count); 337 out: 338 spin_unlock(&_minor_lock); 339 340 return md ? 0 : -ENXIO; 341 } 342 343 static void dm_blk_close(struct gendisk *disk, fmode_t mode) 344 { 345 struct mapped_device *md; 346 347 spin_lock(&_minor_lock); 348 349 md = disk->private_data; 350 if (WARN_ON(!md)) 351 goto out; 352 353 if (atomic_dec_and_test(&md->open_count) && 354 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 355 queue_work(deferred_remove_workqueue, &deferred_remove_work); 356 357 dm_put(md); 358 out: 359 spin_unlock(&_minor_lock); 360 } 361 362 int dm_open_count(struct mapped_device *md) 363 { 364 return atomic_read(&md->open_count); 365 } 366 367 /* 368 * Guarantees nothing is using the device before it's deleted. 369 */ 370 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 371 { 372 int r = 0; 373 374 spin_lock(&_minor_lock); 375 376 if (dm_open_count(md)) { 377 r = -EBUSY; 378 if (mark_deferred) 379 set_bit(DMF_DEFERRED_REMOVE, &md->flags); 380 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 381 r = -EEXIST; 382 else 383 set_bit(DMF_DELETING, &md->flags); 384 385 spin_unlock(&_minor_lock); 386 387 return r; 388 } 389 390 int dm_cancel_deferred_remove(struct mapped_device *md) 391 { 392 int r = 0; 393 394 spin_lock(&_minor_lock); 395 396 if (test_bit(DMF_DELETING, &md->flags)) 397 r = -EBUSY; 398 else 399 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 400 401 spin_unlock(&_minor_lock); 402 403 return r; 404 } 405 406 static void do_deferred_remove(struct work_struct *w) 407 { 408 dm_deferred_remove(); 409 } 410 411 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 412 { 413 struct mapped_device *md = bdev->bd_disk->private_data; 414 415 return dm_get_geometry(md, geo); 416 } 417 418 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 419 struct block_device **bdev) 420 { 421 struct dm_target *tgt; 422 struct dm_table *map; 423 int r; 424 425 retry: 426 r = -ENOTTY; 427 map = dm_get_live_table(md, srcu_idx); 428 if (!map || !dm_table_get_size(map)) 429 return r; 430 431 /* We only support devices that have a single target */ 432 if (dm_table_get_num_targets(map) != 1) 433 return r; 434 435 tgt = dm_table_get_target(map, 0); 436 if (!tgt->type->prepare_ioctl) 437 return r; 438 439 if (dm_suspended_md(md)) 440 return -EAGAIN; 441 442 r = tgt->type->prepare_ioctl(tgt, bdev); 443 if (r == -ENOTCONN && !fatal_signal_pending(current)) { 444 dm_put_live_table(md, *srcu_idx); 445 msleep(10); 446 goto retry; 447 } 448 449 return r; 450 } 451 452 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 453 { 454 dm_put_live_table(md, srcu_idx); 455 } 456 457 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 458 unsigned int cmd, unsigned long arg) 459 { 460 struct mapped_device *md = bdev->bd_disk->private_data; 461 int r, srcu_idx; 462 463 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 464 if (r < 0) 465 goto out; 466 467 if (r > 0) { 468 /* 469 * Target determined this ioctl is being issued against a 470 * subset of the parent bdev; require extra privileges. 471 */ 472 if (!capable(CAP_SYS_RAWIO)) { 473 DMDEBUG_LIMIT( 474 "%s: sending ioctl %x to DM device without required privilege.", 475 current->comm, cmd); 476 r = -ENOIOCTLCMD; 477 goto out; 478 } 479 } 480 481 if (!bdev->bd_disk->fops->ioctl) 482 r = -ENOTTY; 483 else 484 r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); 485 out: 486 dm_unprepare_ioctl(md, srcu_idx); 487 return r; 488 } 489 490 u64 dm_start_time_ns_from_clone(struct bio *bio) 491 { 492 return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time); 493 } 494 EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); 495 496 static bool bio_is_flush_with_data(struct bio *bio) 497 { 498 return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size); 499 } 500 501 static void dm_io_acct(bool end, struct mapped_device *md, struct bio *bio, 502 unsigned long start_time, struct dm_stats_aux *stats_aux) 503 { 504 bool is_flush_with_data; 505 unsigned int bi_size; 506 507 /* If REQ_PREFLUSH set save any payload but do not account it */ 508 is_flush_with_data = bio_is_flush_with_data(bio); 509 if (is_flush_with_data) { 510 bi_size = bio->bi_iter.bi_size; 511 bio->bi_iter.bi_size = 0; 512 } 513 514 if (!end) 515 bio_start_io_acct_time(bio, start_time); 516 else 517 bio_end_io_acct(bio, start_time); 518 519 if (unlikely(dm_stats_used(&md->stats))) 520 dm_stats_account_io(&md->stats, bio_data_dir(bio), 521 bio->bi_iter.bi_sector, bio_sectors(bio), 522 end, start_time, stats_aux); 523 524 /* Restore bio's payload so it does get accounted upon requeue */ 525 if (is_flush_with_data) 526 bio->bi_iter.bi_size = bi_size; 527 } 528 529 static void __dm_start_io_acct(struct dm_io *io, struct bio *bio) 530 { 531 dm_io_acct(false, io->md, bio, io->start_time, &io->stats_aux); 532 } 533 534 static void dm_start_io_acct(struct dm_io *io, struct bio *clone) 535 { 536 /* Must account IO to DM device in terms of orig_bio */ 537 struct bio *bio = io->orig_bio; 538 539 /* 540 * Ensure IO accounting is only ever started once. 541 * Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO. 542 */ 543 if (!clone || 544 likely(!dm_tio_flagged(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO))) { 545 if (WARN_ON_ONCE(dm_io_flagged(io, DM_IO_ACCOUNTED))) 546 return; 547 dm_io_set_flag(io, DM_IO_ACCOUNTED); 548 } else { 549 unsigned long flags; 550 if (dm_io_flagged(io, DM_IO_ACCOUNTED)) 551 return; 552 /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */ 553 spin_lock_irqsave(&io->lock, flags); 554 dm_io_set_flag(io, DM_IO_ACCOUNTED); 555 spin_unlock_irqrestore(&io->lock, flags); 556 } 557 558 __dm_start_io_acct(io, bio); 559 } 560 561 static void dm_end_io_acct(struct dm_io *io, struct bio *bio) 562 { 563 dm_io_acct(true, io->md, bio, io->start_time, &io->stats_aux); 564 } 565 566 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 567 { 568 struct dm_io *io; 569 struct dm_target_io *tio; 570 struct bio *clone; 571 572 clone = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, &md->io_bs); 573 574 tio = clone_to_tio(clone); 575 tio->flags = 0; 576 dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO); 577 tio->io = NULL; 578 579 io = container_of(tio, struct dm_io, tio); 580 io->magic = DM_IO_MAGIC; 581 io->status = 0; 582 atomic_set(&io->io_count, 1); 583 this_cpu_inc(*md->pending_io); 584 io->orig_bio = NULL; 585 io->md = md; 586 io->map_task = current; 587 spin_lock_init(&io->lock); 588 io->start_time = jiffies; 589 io->flags = 0; 590 591 dm_stats_record_start(&md->stats, &io->stats_aux); 592 593 return io; 594 } 595 596 static void free_io(struct dm_io *io) 597 { 598 bio_put(&io->tio.clone); 599 } 600 601 static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, 602 unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask) 603 { 604 struct dm_target_io *tio; 605 struct bio *clone; 606 607 if (!ci->io->tio.io) { 608 /* the dm_target_io embedded in ci->io is available */ 609 tio = &ci->io->tio; 610 /* alloc_io() already initialized embedded clone */ 611 clone = &tio->clone; 612 } else { 613 clone = bio_alloc_clone(ci->bio->bi_bdev, ci->bio, 614 gfp_mask, &ci->io->md->bs); 615 if (!clone) 616 return NULL; 617 618 /* REQ_DM_POLL_LIST shouldn't be inherited */ 619 clone->bi_opf &= ~REQ_DM_POLL_LIST; 620 621 tio = clone_to_tio(clone); 622 tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */ 623 } 624 625 tio->magic = DM_TIO_MAGIC; 626 tio->io = ci->io; 627 tio->ti = ti; 628 tio->target_bio_nr = target_bio_nr; 629 tio->len_ptr = len; 630 tio->old_sector = 0; 631 632 if (len) { 633 clone->bi_iter.bi_size = to_bytes(*len); 634 if (bio_integrity(clone)) 635 bio_integrity_trim(clone); 636 } 637 638 return clone; 639 } 640 641 static void free_tio(struct bio *clone) 642 { 643 if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO)) 644 return; 645 bio_put(clone); 646 } 647 648 /* 649 * Add the bio to the list of deferred io. 650 */ 651 static void queue_io(struct mapped_device *md, struct bio *bio) 652 { 653 unsigned long flags; 654 655 spin_lock_irqsave(&md->deferred_lock, flags); 656 bio_list_add(&md->deferred, bio); 657 spin_unlock_irqrestore(&md->deferred_lock, flags); 658 queue_work(md->wq, &md->work); 659 } 660 661 /* 662 * Everyone (including functions in this file), should use this 663 * function to access the md->map field, and make sure they call 664 * dm_put_live_table() when finished. 665 */ 666 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 667 { 668 *srcu_idx = srcu_read_lock(&md->io_barrier); 669 670 return srcu_dereference(md->map, &md->io_barrier); 671 } 672 673 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 674 { 675 srcu_read_unlock(&md->io_barrier, srcu_idx); 676 } 677 678 void dm_sync_table(struct mapped_device *md) 679 { 680 synchronize_srcu(&md->io_barrier); 681 synchronize_rcu_expedited(); 682 } 683 684 /* 685 * A fast alternative to dm_get_live_table/dm_put_live_table. 686 * The caller must not block between these two functions. 687 */ 688 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 689 { 690 rcu_read_lock(); 691 return rcu_dereference(md->map); 692 } 693 694 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 695 { 696 rcu_read_unlock(); 697 } 698 699 static char *_dm_claim_ptr = "I belong to device-mapper"; 700 701 /* 702 * Open a table device so we can use it as a map destination. 703 */ 704 static int open_table_device(struct table_device *td, dev_t dev, 705 struct mapped_device *md) 706 { 707 struct block_device *bdev; 708 u64 part_off; 709 int r; 710 711 BUG_ON(td->dm_dev.bdev); 712 713 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); 714 if (IS_ERR(bdev)) 715 return PTR_ERR(bdev); 716 717 r = bd_link_disk_holder(bdev, dm_disk(md)); 718 if (r) { 719 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 720 return r; 721 } 722 723 td->dm_dev.bdev = bdev; 724 td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off); 725 return 0; 726 } 727 728 /* 729 * Close a table device that we've been using. 730 */ 731 static void close_table_device(struct table_device *td, struct mapped_device *md) 732 { 733 if (!td->dm_dev.bdev) 734 return; 735 736 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 737 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 738 put_dax(td->dm_dev.dax_dev); 739 td->dm_dev.bdev = NULL; 740 td->dm_dev.dax_dev = NULL; 741 } 742 743 static struct table_device *find_table_device(struct list_head *l, dev_t dev, 744 fmode_t mode) 745 { 746 struct table_device *td; 747 748 list_for_each_entry(td, l, list) 749 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 750 return td; 751 752 return NULL; 753 } 754 755 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 756 struct dm_dev **result) 757 { 758 int r; 759 struct table_device *td; 760 761 mutex_lock(&md->table_devices_lock); 762 td = find_table_device(&md->table_devices, dev, mode); 763 if (!td) { 764 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 765 if (!td) { 766 mutex_unlock(&md->table_devices_lock); 767 return -ENOMEM; 768 } 769 770 td->dm_dev.mode = mode; 771 td->dm_dev.bdev = NULL; 772 773 if ((r = open_table_device(td, dev, md))) { 774 mutex_unlock(&md->table_devices_lock); 775 kfree(td); 776 return r; 777 } 778 779 format_dev_t(td->dm_dev.name, dev); 780 781 refcount_set(&td->count, 1); 782 list_add(&td->list, &md->table_devices); 783 } else { 784 refcount_inc(&td->count); 785 } 786 mutex_unlock(&md->table_devices_lock); 787 788 *result = &td->dm_dev; 789 return 0; 790 } 791 792 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 793 { 794 struct table_device *td = container_of(d, struct table_device, dm_dev); 795 796 mutex_lock(&md->table_devices_lock); 797 if (refcount_dec_and_test(&td->count)) { 798 close_table_device(td, md); 799 list_del(&td->list); 800 kfree(td); 801 } 802 mutex_unlock(&md->table_devices_lock); 803 } 804 805 static void free_table_devices(struct list_head *devices) 806 { 807 struct list_head *tmp, *next; 808 809 list_for_each_safe(tmp, next, devices) { 810 struct table_device *td = list_entry(tmp, struct table_device, list); 811 812 DMWARN("dm_destroy: %s still exists with %d references", 813 td->dm_dev.name, refcount_read(&td->count)); 814 kfree(td); 815 } 816 } 817 818 /* 819 * Get the geometry associated with a dm device 820 */ 821 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 822 { 823 *geo = md->geometry; 824 825 return 0; 826 } 827 828 /* 829 * Set the geometry of a device. 830 */ 831 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 832 { 833 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 834 835 if (geo->start > sz) { 836 DMWARN("Start sector is beyond the geometry limits."); 837 return -EINVAL; 838 } 839 840 md->geometry = *geo; 841 842 return 0; 843 } 844 845 static int __noflush_suspending(struct mapped_device *md) 846 { 847 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 848 } 849 850 static void dm_io_complete(struct dm_io *io) 851 { 852 blk_status_t io_error; 853 struct mapped_device *md = io->md; 854 struct bio *bio = io->orig_bio; 855 856 if (io->status == BLK_STS_DM_REQUEUE) { 857 unsigned long flags; 858 /* 859 * Target requested pushing back the I/O. 860 */ 861 spin_lock_irqsave(&md->deferred_lock, flags); 862 if (__noflush_suspending(md) && 863 !WARN_ON_ONCE(dm_is_zone_write(md, bio))) { 864 /* NOTE early return due to BLK_STS_DM_REQUEUE below */ 865 bio_list_add_head(&md->deferred, bio); 866 } else { 867 /* 868 * noflush suspend was interrupted or this is 869 * a write to a zoned target. 870 */ 871 io->status = BLK_STS_IOERR; 872 } 873 spin_unlock_irqrestore(&md->deferred_lock, flags); 874 } 875 876 io_error = io->status; 877 if (dm_io_flagged(io, DM_IO_ACCOUNTED)) 878 dm_end_io_acct(io, bio); 879 else if (!io_error) { 880 /* 881 * Must handle target that DM_MAPIO_SUBMITTED only to 882 * then bio_endio() rather than dm_submit_bio_remap() 883 */ 884 __dm_start_io_acct(io, bio); 885 dm_end_io_acct(io, bio); 886 } 887 free_io(io); 888 smp_wmb(); 889 this_cpu_dec(*md->pending_io); 890 891 /* nudge anyone waiting on suspend queue */ 892 if (unlikely(wq_has_sleeper(&md->wait))) 893 wake_up(&md->wait); 894 895 if (io_error == BLK_STS_DM_REQUEUE || io_error == BLK_STS_AGAIN) { 896 if (bio->bi_opf & REQ_POLLED) { 897 /* 898 * Upper layer won't help us poll split bio (io->orig_bio 899 * may only reflect a subset of the pre-split original) 900 * so clear REQ_POLLED in case of requeue. 901 */ 902 bio->bi_opf &= ~REQ_POLLED; 903 if (io_error == BLK_STS_AGAIN) { 904 /* io_uring doesn't handle BLK_STS_AGAIN (yet) */ 905 queue_io(md, bio); 906 } 907 } 908 return; 909 } 910 911 if (bio_is_flush_with_data(bio)) { 912 /* 913 * Preflush done for flush with data, reissue 914 * without REQ_PREFLUSH. 915 */ 916 bio->bi_opf &= ~REQ_PREFLUSH; 917 queue_io(md, bio); 918 } else { 919 /* done with normal IO or empty flush */ 920 if (io_error) 921 bio->bi_status = io_error; 922 bio_endio(bio); 923 } 924 } 925 926 static inline bool dm_tio_is_normal(struct dm_target_io *tio) 927 { 928 return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) && 929 !dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); 930 } 931 932 /* 933 * Decrements the number of outstanding ios that a bio has been 934 * cloned into, completing the original io if necc. 935 */ 936 void dm_io_dec_pending(struct dm_io *io, blk_status_t error) 937 { 938 /* Push-back supersedes any I/O errors */ 939 if (unlikely(error)) { 940 unsigned long flags; 941 spin_lock_irqsave(&io->lock, flags); 942 if (!(io->status == BLK_STS_DM_REQUEUE && 943 __noflush_suspending(io->md))) 944 io->status = error; 945 spin_unlock_irqrestore(&io->lock, flags); 946 } 947 948 if (atomic_dec_and_test(&io->io_count)) 949 dm_io_complete(io); 950 } 951 952 void disable_discard(struct mapped_device *md) 953 { 954 struct queue_limits *limits = dm_get_queue_limits(md); 955 956 /* device doesn't really support DISCARD, disable it */ 957 limits->max_discard_sectors = 0; 958 } 959 960 void disable_write_zeroes(struct mapped_device *md) 961 { 962 struct queue_limits *limits = dm_get_queue_limits(md); 963 964 /* device doesn't really support WRITE ZEROES, disable it */ 965 limits->max_write_zeroes_sectors = 0; 966 } 967 968 static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) 969 { 970 return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); 971 } 972 973 static void clone_endio(struct bio *bio) 974 { 975 blk_status_t error = bio->bi_status; 976 struct dm_target_io *tio = clone_to_tio(bio); 977 struct dm_io *io = tio->io; 978 struct mapped_device *md = tio->io->md; 979 dm_endio_fn endio = tio->ti->type->end_io; 980 struct request_queue *q = bio->bi_bdev->bd_disk->queue; 981 982 if (unlikely(error == BLK_STS_TARGET)) { 983 if (bio_op(bio) == REQ_OP_DISCARD && 984 !bdev_max_discard_sectors(bio->bi_bdev)) 985 disable_discard(md); 986 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 987 !q->limits.max_write_zeroes_sectors) 988 disable_write_zeroes(md); 989 } 990 991 if (blk_queue_is_zoned(q)) 992 dm_zone_endio(io, bio); 993 994 if (endio) { 995 int r = endio(tio->ti, bio, &error); 996 switch (r) { 997 case DM_ENDIO_REQUEUE: 998 /* 999 * Requeuing writes to a sequential zone of a zoned 1000 * target will break the sequential write pattern: 1001 * fail such IO. 1002 */ 1003 if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) 1004 error = BLK_STS_IOERR; 1005 else 1006 error = BLK_STS_DM_REQUEUE; 1007 fallthrough; 1008 case DM_ENDIO_DONE: 1009 break; 1010 case DM_ENDIO_INCOMPLETE: 1011 /* The target will handle the io */ 1012 return; 1013 default: 1014 DMWARN("unimplemented target endio return value: %d", r); 1015 BUG(); 1016 } 1017 } 1018 1019 if (unlikely(swap_bios_limit(tio->ti, bio))) { 1020 struct mapped_device *md = io->md; 1021 up(&md->swap_bios_semaphore); 1022 } 1023 1024 free_tio(bio); 1025 dm_io_dec_pending(io, error); 1026 } 1027 1028 /* 1029 * Return maximum size of I/O possible at the supplied sector up to the current 1030 * target boundary. 1031 */ 1032 static inline sector_t max_io_len_target_boundary(struct dm_target *ti, 1033 sector_t target_offset) 1034 { 1035 return ti->len - target_offset; 1036 } 1037 1038 static sector_t max_io_len(struct dm_target *ti, sector_t sector) 1039 { 1040 sector_t target_offset = dm_target_offset(ti, sector); 1041 sector_t len = max_io_len_target_boundary(ti, target_offset); 1042 sector_t max_len; 1043 1044 /* 1045 * Does the target need to split IO even further? 1046 * - varied (per target) IO splitting is a tenet of DM; this 1047 * explains why stacked chunk_sectors based splitting via 1048 * blk_max_size_offset() isn't possible here. So pass in 1049 * ti->max_io_len to override stacked chunk_sectors. 1050 */ 1051 if (ti->max_io_len) { 1052 max_len = blk_max_size_offset(ti->table->md->queue, 1053 target_offset, ti->max_io_len); 1054 if (len > max_len) 1055 len = max_len; 1056 } 1057 1058 return len; 1059 } 1060 1061 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1062 { 1063 if (len > UINT_MAX) { 1064 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1065 (unsigned long long)len, UINT_MAX); 1066 ti->error = "Maximum size of target IO is too large"; 1067 return -EINVAL; 1068 } 1069 1070 ti->max_io_len = (uint32_t) len; 1071 1072 return 0; 1073 } 1074 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1075 1076 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1077 sector_t sector, int *srcu_idx) 1078 __acquires(md->io_barrier) 1079 { 1080 struct dm_table *map; 1081 struct dm_target *ti; 1082 1083 map = dm_get_live_table(md, srcu_idx); 1084 if (!map) 1085 return NULL; 1086 1087 ti = dm_table_find_target(map, sector); 1088 if (!ti) 1089 return NULL; 1090 1091 return ti; 1092 } 1093 1094 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1095 long nr_pages, void **kaddr, pfn_t *pfn) 1096 { 1097 struct mapped_device *md = dax_get_private(dax_dev); 1098 sector_t sector = pgoff * PAGE_SECTORS; 1099 struct dm_target *ti; 1100 long len, ret = -EIO; 1101 int srcu_idx; 1102 1103 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1104 1105 if (!ti) 1106 goto out; 1107 if (!ti->type->direct_access) 1108 goto out; 1109 len = max_io_len(ti, sector) / PAGE_SECTORS; 1110 if (len < 1) 1111 goto out; 1112 nr_pages = min(len, nr_pages); 1113 ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 1114 1115 out: 1116 dm_put_live_table(md, srcu_idx); 1117 1118 return ret; 1119 } 1120 1121 static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 1122 size_t nr_pages) 1123 { 1124 struct mapped_device *md = dax_get_private(dax_dev); 1125 sector_t sector = pgoff * PAGE_SECTORS; 1126 struct dm_target *ti; 1127 int ret = -EIO; 1128 int srcu_idx; 1129 1130 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1131 1132 if (!ti) 1133 goto out; 1134 if (WARN_ON(!ti->type->dax_zero_page_range)) { 1135 /* 1136 * ->zero_page_range() is mandatory dax operation. If we are 1137 * here, something is wrong. 1138 */ 1139 goto out; 1140 } 1141 ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); 1142 out: 1143 dm_put_live_table(md, srcu_idx); 1144 1145 return ret; 1146 } 1147 1148 /* 1149 * A target may call dm_accept_partial_bio only from the map routine. It is 1150 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management 1151 * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by 1152 * __send_duplicate_bios(). 1153 * 1154 * dm_accept_partial_bio informs the dm that the target only wants to process 1155 * additional n_sectors sectors of the bio and the rest of the data should be 1156 * sent in a next bio. 1157 * 1158 * A diagram that explains the arithmetics: 1159 * +--------------------+---------------+-------+ 1160 * | 1 | 2 | 3 | 1161 * +--------------------+---------------+-------+ 1162 * 1163 * <-------------- *tio->len_ptr ---------------> 1164 * <------- bi_size -------> 1165 * <-- n_sectors --> 1166 * 1167 * Region 1 was already iterated over with bio_advance or similar function. 1168 * (it may be empty if the target doesn't use bio_advance) 1169 * Region 2 is the remaining bio size that the target wants to process. 1170 * (it may be empty if region 1 is non-empty, although there is no reason 1171 * to make it empty) 1172 * The target requires that region 3 is to be sent in the next bio. 1173 * 1174 * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 1175 * the partially processed part (the sum of regions 1+2) must be the same for all 1176 * copies of the bio. 1177 */ 1178 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 1179 { 1180 struct dm_target_io *tio = clone_to_tio(bio); 1181 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 1182 1183 BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); 1184 BUG_ON(op_is_zone_mgmt(bio_op(bio))); 1185 BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND); 1186 BUG_ON(bi_size > *tio->len_ptr); 1187 BUG_ON(n_sectors > bi_size); 1188 1189 *tio->len_ptr -= bi_size - n_sectors; 1190 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1191 } 1192 EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1193 1194 static inline void __dm_submit_bio_remap(struct bio *clone, 1195 dev_t dev, sector_t old_sector) 1196 { 1197 trace_block_bio_remap(clone, dev, old_sector); 1198 submit_bio_noacct(clone); 1199 } 1200 1201 /* 1202 * @clone: clone bio that DM core passed to target's .map function 1203 * @tgt_clone: clone of @clone bio that target needs submitted 1204 * 1205 * Targets should use this interface to submit bios they take 1206 * ownership of when returning DM_MAPIO_SUBMITTED. 1207 * 1208 * Target should also enable ti->accounts_remapped_io 1209 */ 1210 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone) 1211 { 1212 struct dm_target_io *tio = clone_to_tio(clone); 1213 struct dm_io *io = tio->io; 1214 1215 WARN_ON_ONCE(!tio->ti->accounts_remapped_io); 1216 1217 /* establish bio that will get submitted */ 1218 if (!tgt_clone) 1219 tgt_clone = clone; 1220 1221 /* 1222 * Account io->origin_bio to DM dev on behalf of target 1223 * that took ownership of IO with DM_MAPIO_SUBMITTED. 1224 */ 1225 if (io->map_task == current) { 1226 /* Still in target's map function */ 1227 dm_io_set_flag(io, DM_IO_START_ACCT); 1228 } else { 1229 /* 1230 * Called by another thread, managed by DM target, 1231 * wait for dm_split_and_process_bio() to store 1232 * io->orig_bio 1233 */ 1234 while (unlikely(!smp_load_acquire(&io->orig_bio))) 1235 msleep(1); 1236 dm_start_io_acct(io, clone); 1237 } 1238 1239 __dm_submit_bio_remap(tgt_clone, disk_devt(io->md->disk), 1240 tio->old_sector); 1241 } 1242 EXPORT_SYMBOL_GPL(dm_submit_bio_remap); 1243 1244 static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) 1245 { 1246 mutex_lock(&md->swap_bios_lock); 1247 while (latch < md->swap_bios) { 1248 cond_resched(); 1249 down(&md->swap_bios_semaphore); 1250 md->swap_bios--; 1251 } 1252 while (latch > md->swap_bios) { 1253 cond_resched(); 1254 up(&md->swap_bios_semaphore); 1255 md->swap_bios++; 1256 } 1257 mutex_unlock(&md->swap_bios_lock); 1258 } 1259 1260 static void __map_bio(struct bio *clone) 1261 { 1262 struct dm_target_io *tio = clone_to_tio(clone); 1263 int r; 1264 struct dm_io *io = tio->io; 1265 struct dm_target *ti = tio->ti; 1266 1267 clone->bi_end_io = clone_endio; 1268 1269 /* 1270 * Map the clone. 1271 */ 1272 dm_io_inc_pending(io); 1273 tio->old_sector = clone->bi_iter.bi_sector; 1274 1275 if (unlikely(swap_bios_limit(ti, clone))) { 1276 struct mapped_device *md = io->md; 1277 int latch = get_swap_bios(); 1278 if (unlikely(latch != md->swap_bios)) 1279 __set_swap_bios_limit(md, latch); 1280 down(&md->swap_bios_semaphore); 1281 } 1282 1283 /* 1284 * Check if the IO needs a special mapping due to zone append emulation 1285 * on zoned target. In this case, dm_zone_map_bio() calls the target 1286 * map operation. 1287 */ 1288 if (dm_emulate_zone_append(io->md)) 1289 r = dm_zone_map_bio(tio); 1290 else 1291 r = ti->type->map(ti, clone); 1292 1293 switch (r) { 1294 case DM_MAPIO_SUBMITTED: 1295 /* target has assumed ownership of this io */ 1296 if (!ti->accounts_remapped_io) 1297 dm_io_set_flag(io, DM_IO_START_ACCT); 1298 break; 1299 case DM_MAPIO_REMAPPED: 1300 /* 1301 * the bio has been remapped so dispatch it, but defer 1302 * dm_start_io_acct() until after possible bio_split(). 1303 */ 1304 __dm_submit_bio_remap(clone, disk_devt(io->md->disk), 1305 tio->old_sector); 1306 dm_io_set_flag(io, DM_IO_START_ACCT); 1307 break; 1308 case DM_MAPIO_KILL: 1309 case DM_MAPIO_REQUEUE: 1310 if (unlikely(swap_bios_limit(ti, clone))) 1311 up(&io->md->swap_bios_semaphore); 1312 free_tio(clone); 1313 if (r == DM_MAPIO_KILL) 1314 dm_io_dec_pending(io, BLK_STS_IOERR); 1315 else 1316 dm_io_dec_pending(io, BLK_STS_DM_REQUEUE); 1317 break; 1318 default: 1319 DMWARN("unimplemented target map return value: %d", r); 1320 BUG(); 1321 } 1322 } 1323 1324 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1325 struct dm_target *ti, unsigned num_bios) 1326 { 1327 struct bio *bio; 1328 int try; 1329 1330 for (try = 0; try < 2; try++) { 1331 int bio_nr; 1332 1333 if (try) 1334 mutex_lock(&ci->io->md->table_devices_lock); 1335 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1336 bio = alloc_tio(ci, ti, bio_nr, NULL, 1337 try ? GFP_NOIO : GFP_NOWAIT); 1338 if (!bio) 1339 break; 1340 1341 bio_list_add(blist, bio); 1342 } 1343 if (try) 1344 mutex_unlock(&ci->io->md->table_devices_lock); 1345 if (bio_nr == num_bios) 1346 return; 1347 1348 while ((bio = bio_list_pop(blist))) 1349 free_tio(bio); 1350 } 1351 } 1352 1353 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1354 unsigned num_bios, unsigned *len) 1355 { 1356 struct bio_list blist = BIO_EMPTY_LIST; 1357 struct bio *clone; 1358 1359 switch (num_bios) { 1360 case 0: 1361 break; 1362 case 1: 1363 clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); 1364 __map_bio(clone); 1365 break; 1366 default: 1367 /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */ 1368 alloc_multiple_bios(&blist, ci, ti, num_bios); 1369 while ((clone = bio_list_pop(&blist))) { 1370 dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); 1371 __map_bio(clone); 1372 } 1373 break; 1374 } 1375 } 1376 1377 static void __send_empty_flush(struct clone_info *ci) 1378 { 1379 unsigned target_nr = 0; 1380 struct dm_target *ti; 1381 struct bio flush_bio; 1382 1383 /* 1384 * Use an on-stack bio for this, it's safe since we don't 1385 * need to reference it after submit. It's just used as 1386 * the basis for the clone(s). 1387 */ 1388 bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, 1389 REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC); 1390 1391 ci->bio = &flush_bio; 1392 ci->sector_count = 0; 1393 ci->io->tio.clone.bi_iter.bi_size = 0; 1394 1395 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1396 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1397 1398 bio_uninit(ci->bio); 1399 } 1400 1401 static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 1402 unsigned num_bios) 1403 { 1404 unsigned len; 1405 1406 len = min_t(sector_t, ci->sector_count, 1407 max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); 1408 1409 __send_duplicate_bios(ci, ti, num_bios, &len); 1410 1411 ci->sector += len; 1412 ci->sector_count -= len; 1413 } 1414 1415 static bool is_abnormal_io(struct bio *bio) 1416 { 1417 bool r = false; 1418 1419 switch (bio_op(bio)) { 1420 case REQ_OP_DISCARD: 1421 case REQ_OP_SECURE_ERASE: 1422 case REQ_OP_WRITE_ZEROES: 1423 r = true; 1424 break; 1425 } 1426 1427 return r; 1428 } 1429 1430 static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, 1431 int *result) 1432 { 1433 unsigned num_bios = 0; 1434 1435 switch (bio_op(ci->bio)) { 1436 case REQ_OP_DISCARD: 1437 num_bios = ti->num_discard_bios; 1438 break; 1439 case REQ_OP_SECURE_ERASE: 1440 num_bios = ti->num_secure_erase_bios; 1441 break; 1442 case REQ_OP_WRITE_ZEROES: 1443 num_bios = ti->num_write_zeroes_bios; 1444 break; 1445 default: 1446 return false; 1447 } 1448 1449 /* 1450 * Even though the device advertised support for this type of 1451 * request, that does not mean every target supports it, and 1452 * reconfiguration might also have changed that since the 1453 * check was performed. 1454 */ 1455 if (!num_bios) 1456 *result = -EOPNOTSUPP; 1457 else { 1458 __send_changing_extent_only(ci, ti, num_bios); 1459 *result = 0; 1460 } 1461 return true; 1462 } 1463 1464 /* 1465 * Reuse ->bi_private as hlist head for storing all dm_io instances 1466 * associated with this bio, and this bio's bi_private needs to be 1467 * stored in dm_io->data before the reuse. 1468 * 1469 * bio->bi_private is owned by fs or upper layer, so block layer won't 1470 * touch it after splitting. Meantime it won't be changed by anyone after 1471 * bio is submitted. So this reuse is safe. 1472 */ 1473 static inline struct hlist_head *dm_get_bio_hlist_head(struct bio *bio) 1474 { 1475 return (struct hlist_head *)&bio->bi_private; 1476 } 1477 1478 static void dm_queue_poll_io(struct bio *bio, struct dm_io *io) 1479 { 1480 struct hlist_head *head = dm_get_bio_hlist_head(bio); 1481 1482 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) { 1483 bio->bi_opf |= REQ_DM_POLL_LIST; 1484 /* 1485 * Save .bi_private into dm_io, so that we can reuse 1486 * .bi_private as hlist head for storing dm_io list 1487 */ 1488 io->data = bio->bi_private; 1489 1490 INIT_HLIST_HEAD(head); 1491 1492 /* tell block layer to poll for completion */ 1493 bio->bi_cookie = ~BLK_QC_T_NONE; 1494 } else { 1495 /* 1496 * bio recursed due to split, reuse original poll list, 1497 * and save bio->bi_private too. 1498 */ 1499 io->data = hlist_entry(head->first, struct dm_io, node)->data; 1500 } 1501 1502 hlist_add_head(&io->node, head); 1503 } 1504 1505 /* 1506 * Select the correct strategy for processing a non-flush bio. 1507 */ 1508 static int __split_and_process_bio(struct clone_info *ci) 1509 { 1510 struct bio *clone; 1511 struct dm_target *ti; 1512 unsigned len; 1513 int r; 1514 1515 ti = dm_table_find_target(ci->map, ci->sector); 1516 if (!ti) 1517 return -EIO; 1518 1519 if (__process_abnormal_io(ci, ti, &r)) 1520 return r; 1521 1522 /* 1523 * Only support bio polling for normal IO, and the target io is 1524 * exactly inside the dm_io instance (verified in dm_poll_dm_io) 1525 */ 1526 ci->submit_as_polled = ci->bio->bi_opf & REQ_POLLED; 1527 1528 len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); 1529 clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); 1530 __map_bio(clone); 1531 1532 ci->sector += len; 1533 ci->sector_count -= len; 1534 1535 return 0; 1536 } 1537 1538 static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 1539 struct dm_table *map, struct bio *bio) 1540 { 1541 ci->map = map; 1542 ci->io = alloc_io(md, bio); 1543 ci->bio = bio; 1544 ci->submit_as_polled = false; 1545 ci->sector = bio->bi_iter.bi_sector; 1546 ci->sector_count = bio_sectors(bio); 1547 1548 /* Shouldn't happen but sector_count was being set to 0 so... */ 1549 if (WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count)) 1550 ci->sector_count = 0; 1551 } 1552 1553 /* 1554 * Entry point to split a bio into clones and submit them to the targets. 1555 */ 1556 static void dm_split_and_process_bio(struct mapped_device *md, 1557 struct dm_table *map, struct bio *bio) 1558 { 1559 struct clone_info ci; 1560 struct bio *orig_bio = NULL; 1561 int error = 0; 1562 1563 init_clone_info(&ci, md, map, bio); 1564 1565 if (bio->bi_opf & REQ_PREFLUSH) { 1566 __send_empty_flush(&ci); 1567 /* dm_io_complete submits any data associated with flush */ 1568 goto out; 1569 } 1570 1571 error = __split_and_process_bio(&ci); 1572 ci.io->map_task = NULL; 1573 if (error || !ci.sector_count) 1574 goto out; 1575 1576 /* 1577 * Remainder must be passed to submit_bio_noacct() so it gets handled 1578 * *after* bios already submitted have been completely processed. 1579 * We take a clone of the original to store in ci.io->orig_bio to be 1580 * used by dm_end_io_acct() and for dm_io_complete() to use for 1581 * completion handling. 1582 */ 1583 orig_bio = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1584 GFP_NOIO, &md->queue->bio_split); 1585 bio_chain(orig_bio, bio); 1586 trace_block_split(orig_bio, bio->bi_iter.bi_sector); 1587 submit_bio_noacct(bio); 1588 out: 1589 if (!orig_bio) 1590 orig_bio = bio; 1591 smp_store_release(&ci.io->orig_bio, orig_bio); 1592 if (dm_io_flagged(ci.io, DM_IO_START_ACCT)) 1593 dm_start_io_acct(ci.io, NULL); 1594 1595 /* 1596 * Drop the extra reference count for non-POLLED bio, and hold one 1597 * reference for POLLED bio, which will be released in dm_poll_bio 1598 * 1599 * Add every dm_io instance into the hlist_head which is stored in 1600 * bio->bi_private, so that dm_poll_bio can poll them all. 1601 */ 1602 if (error || !ci.submit_as_polled) 1603 dm_io_dec_pending(ci.io, errno_to_blk_status(error)); 1604 else 1605 dm_queue_poll_io(bio, ci.io); 1606 } 1607 1608 static void dm_submit_bio(struct bio *bio) 1609 { 1610 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; 1611 int srcu_idx; 1612 struct dm_table *map; 1613 1614 map = dm_get_live_table(md, &srcu_idx); 1615 1616 /* If suspended, or map not yet available, queue this IO for later */ 1617 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || 1618 unlikely(!map)) { 1619 if (bio->bi_opf & REQ_NOWAIT) 1620 bio_wouldblock_error(bio); 1621 else if (bio->bi_opf & REQ_RAHEAD) 1622 bio_io_error(bio); 1623 else 1624 queue_io(md, bio); 1625 goto out; 1626 } 1627 1628 /* 1629 * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc) 1630 * otherwise associated queue_limits won't be imposed. 1631 */ 1632 if (is_abnormal_io(bio)) 1633 blk_queue_split(&bio); 1634 1635 dm_split_and_process_bio(md, map, bio); 1636 out: 1637 dm_put_live_table(md, srcu_idx); 1638 } 1639 1640 static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob, 1641 unsigned int flags) 1642 { 1643 WARN_ON_ONCE(!dm_tio_is_normal(&io->tio)); 1644 1645 /* don't poll if the mapped io is done */ 1646 if (atomic_read(&io->io_count) > 1) 1647 bio_poll(&io->tio.clone, iob, flags); 1648 1649 /* bio_poll holds the last reference */ 1650 return atomic_read(&io->io_count) == 1; 1651 } 1652 1653 static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob, 1654 unsigned int flags) 1655 { 1656 struct hlist_head *head = dm_get_bio_hlist_head(bio); 1657 struct hlist_head tmp = HLIST_HEAD_INIT; 1658 struct hlist_node *next; 1659 struct dm_io *io; 1660 1661 /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */ 1662 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) 1663 return 0; 1664 1665 WARN_ON_ONCE(hlist_empty(head)); 1666 1667 hlist_move_list(head, &tmp); 1668 1669 /* 1670 * Restore .bi_private before possibly completing dm_io. 1671 * 1672 * bio_poll() is only possible once @bio has been completely 1673 * submitted via submit_bio_noacct()'s depth-first submission. 1674 * So there is no dm_queue_poll_io() race associated with 1675 * clearing REQ_DM_POLL_LIST here. 1676 */ 1677 bio->bi_opf &= ~REQ_DM_POLL_LIST; 1678 bio->bi_private = hlist_entry(tmp.first, struct dm_io, node)->data; 1679 1680 hlist_for_each_entry_safe(io, next, &tmp, node) { 1681 if (dm_poll_dm_io(io, iob, flags)) { 1682 hlist_del_init(&io->node); 1683 /* 1684 * clone_endio() has already occurred, so passing 1685 * error as 0 here doesn't override io->status 1686 */ 1687 dm_io_dec_pending(io, 0); 1688 } 1689 } 1690 1691 /* Not done? */ 1692 if (!hlist_empty(&tmp)) { 1693 bio->bi_opf |= REQ_DM_POLL_LIST; 1694 /* Reset bio->bi_private to dm_io list head */ 1695 hlist_move_list(&tmp, head); 1696 return 0; 1697 } 1698 return 1; 1699 } 1700 1701 /*----------------------------------------------------------------- 1702 * An IDR is used to keep track of allocated minor numbers. 1703 *---------------------------------------------------------------*/ 1704 static void free_minor(int minor) 1705 { 1706 spin_lock(&_minor_lock); 1707 idr_remove(&_minor_idr, minor); 1708 spin_unlock(&_minor_lock); 1709 } 1710 1711 /* 1712 * See if the device with a specific minor # is free. 1713 */ 1714 static int specific_minor(int minor) 1715 { 1716 int r; 1717 1718 if (minor >= (1 << MINORBITS)) 1719 return -EINVAL; 1720 1721 idr_preload(GFP_KERNEL); 1722 spin_lock(&_minor_lock); 1723 1724 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 1725 1726 spin_unlock(&_minor_lock); 1727 idr_preload_end(); 1728 if (r < 0) 1729 return r == -ENOSPC ? -EBUSY : r; 1730 return 0; 1731 } 1732 1733 static int next_free_minor(int *minor) 1734 { 1735 int r; 1736 1737 idr_preload(GFP_KERNEL); 1738 spin_lock(&_minor_lock); 1739 1740 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 1741 1742 spin_unlock(&_minor_lock); 1743 idr_preload_end(); 1744 if (r < 0) 1745 return r; 1746 *minor = r; 1747 return 0; 1748 } 1749 1750 static const struct block_device_operations dm_blk_dops; 1751 static const struct block_device_operations dm_rq_blk_dops; 1752 static const struct dax_operations dm_dax_ops; 1753 1754 static void dm_wq_work(struct work_struct *work); 1755 1756 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 1757 static void dm_queue_destroy_crypto_profile(struct request_queue *q) 1758 { 1759 dm_destroy_crypto_profile(q->crypto_profile); 1760 } 1761 1762 #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1763 1764 static inline void dm_queue_destroy_crypto_profile(struct request_queue *q) 1765 { 1766 } 1767 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ 1768 1769 static void cleanup_mapped_device(struct mapped_device *md) 1770 { 1771 if (md->wq) 1772 destroy_workqueue(md->wq); 1773 bioset_exit(&md->bs); 1774 bioset_exit(&md->io_bs); 1775 1776 if (md->dax_dev) { 1777 dax_remove_host(md->disk); 1778 kill_dax(md->dax_dev); 1779 put_dax(md->dax_dev); 1780 md->dax_dev = NULL; 1781 } 1782 1783 dm_cleanup_zoned_dev(md); 1784 if (md->disk) { 1785 spin_lock(&_minor_lock); 1786 md->disk->private_data = NULL; 1787 spin_unlock(&_minor_lock); 1788 if (dm_get_md_type(md) != DM_TYPE_NONE) { 1789 dm_sysfs_exit(md); 1790 del_gendisk(md->disk); 1791 } 1792 dm_queue_destroy_crypto_profile(md->queue); 1793 blk_cleanup_disk(md->disk); 1794 } 1795 1796 if (md->pending_io) { 1797 free_percpu(md->pending_io); 1798 md->pending_io = NULL; 1799 } 1800 1801 cleanup_srcu_struct(&md->io_barrier); 1802 1803 mutex_destroy(&md->suspend_lock); 1804 mutex_destroy(&md->type_lock); 1805 mutex_destroy(&md->table_devices_lock); 1806 mutex_destroy(&md->swap_bios_lock); 1807 1808 dm_mq_cleanup_mapped_device(md); 1809 } 1810 1811 /* 1812 * Allocate and initialise a blank device with a given minor. 1813 */ 1814 static struct mapped_device *alloc_dev(int minor) 1815 { 1816 int r, numa_node_id = dm_get_numa_node(); 1817 struct mapped_device *md; 1818 void *old_md; 1819 1820 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 1821 if (!md) { 1822 DMWARN("unable to allocate device, out of memory."); 1823 return NULL; 1824 } 1825 1826 if (!try_module_get(THIS_MODULE)) 1827 goto bad_module_get; 1828 1829 /* get a minor number for the dev */ 1830 if (minor == DM_ANY_MINOR) 1831 r = next_free_minor(&minor); 1832 else 1833 r = specific_minor(minor); 1834 if (r < 0) 1835 goto bad_minor; 1836 1837 r = init_srcu_struct(&md->io_barrier); 1838 if (r < 0) 1839 goto bad_io_barrier; 1840 1841 md->numa_node_id = numa_node_id; 1842 md->init_tio_pdu = false; 1843 md->type = DM_TYPE_NONE; 1844 mutex_init(&md->suspend_lock); 1845 mutex_init(&md->type_lock); 1846 mutex_init(&md->table_devices_lock); 1847 spin_lock_init(&md->deferred_lock); 1848 atomic_set(&md->holders, 1); 1849 atomic_set(&md->open_count, 0); 1850 atomic_set(&md->event_nr, 0); 1851 atomic_set(&md->uevent_seq, 0); 1852 INIT_LIST_HEAD(&md->uevent_list); 1853 INIT_LIST_HEAD(&md->table_devices); 1854 spin_lock_init(&md->uevent_lock); 1855 1856 /* 1857 * default to bio-based until DM table is loaded and md->type 1858 * established. If request-based table is loaded: blk-mq will 1859 * override accordingly. 1860 */ 1861 md->disk = blk_alloc_disk(md->numa_node_id); 1862 if (!md->disk) 1863 goto bad; 1864 md->queue = md->disk->queue; 1865 1866 init_waitqueue_head(&md->wait); 1867 INIT_WORK(&md->work, dm_wq_work); 1868 init_waitqueue_head(&md->eventq); 1869 init_completion(&md->kobj_holder.completion); 1870 1871 md->swap_bios = get_swap_bios(); 1872 sema_init(&md->swap_bios_semaphore, md->swap_bios); 1873 mutex_init(&md->swap_bios_lock); 1874 1875 md->disk->major = _major; 1876 md->disk->first_minor = minor; 1877 md->disk->minors = 1; 1878 md->disk->flags |= GENHD_FL_NO_PART; 1879 md->disk->fops = &dm_blk_dops; 1880 md->disk->queue = md->queue; 1881 md->disk->private_data = md; 1882 sprintf(md->disk->disk_name, "dm-%d", minor); 1883 1884 if (IS_ENABLED(CONFIG_FS_DAX)) { 1885 md->dax_dev = alloc_dax(md, &dm_dax_ops); 1886 if (IS_ERR(md->dax_dev)) { 1887 md->dax_dev = NULL; 1888 goto bad; 1889 } 1890 set_dax_nocache(md->dax_dev); 1891 set_dax_nomc(md->dax_dev); 1892 if (dax_add_host(md->dax_dev, md->disk)) 1893 goto bad; 1894 } 1895 1896 format_dev_t(md->name, MKDEV(_major, minor)); 1897 1898 md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name); 1899 if (!md->wq) 1900 goto bad; 1901 1902 md->pending_io = alloc_percpu(unsigned long); 1903 if (!md->pending_io) 1904 goto bad; 1905 1906 dm_stats_init(&md->stats); 1907 1908 /* Populate the mapping, nobody knows we exist yet */ 1909 spin_lock(&_minor_lock); 1910 old_md = idr_replace(&_minor_idr, md, minor); 1911 spin_unlock(&_minor_lock); 1912 1913 BUG_ON(old_md != MINOR_ALLOCED); 1914 1915 return md; 1916 1917 bad: 1918 cleanup_mapped_device(md); 1919 bad_io_barrier: 1920 free_minor(minor); 1921 bad_minor: 1922 module_put(THIS_MODULE); 1923 bad_module_get: 1924 kvfree(md); 1925 return NULL; 1926 } 1927 1928 static void unlock_fs(struct mapped_device *md); 1929 1930 static void free_dev(struct mapped_device *md) 1931 { 1932 int minor = MINOR(disk_devt(md->disk)); 1933 1934 unlock_fs(md); 1935 1936 cleanup_mapped_device(md); 1937 1938 free_table_devices(&md->table_devices); 1939 dm_stats_cleanup(&md->stats); 1940 free_minor(minor); 1941 1942 module_put(THIS_MODULE); 1943 kvfree(md); 1944 } 1945 1946 static int __bind_mempools(struct mapped_device *md, struct dm_table *t) 1947 { 1948 struct dm_md_mempools *p = dm_table_get_md_mempools(t); 1949 int ret = 0; 1950 1951 if (dm_table_bio_based(t)) { 1952 /* 1953 * The md may already have mempools that need changing. 1954 * If so, reload bioset because front_pad may have changed 1955 * because a different table was loaded. 1956 */ 1957 bioset_exit(&md->bs); 1958 bioset_exit(&md->io_bs); 1959 1960 } else if (bioset_initialized(&md->bs)) { 1961 /* 1962 * There's no need to reload with request-based dm 1963 * because the size of front_pad doesn't change. 1964 * Note for future: If you are to reload bioset, 1965 * prep-ed requests in the queue may refer 1966 * to bio from the old bioset, so you must walk 1967 * through the queue to unprep. 1968 */ 1969 goto out; 1970 } 1971 1972 BUG_ON(!p || 1973 bioset_initialized(&md->bs) || 1974 bioset_initialized(&md->io_bs)); 1975 1976 ret = bioset_init_from_src(&md->bs, &p->bs); 1977 if (ret) 1978 goto out; 1979 ret = bioset_init_from_src(&md->io_bs, &p->io_bs); 1980 if (ret) 1981 bioset_exit(&md->bs); 1982 out: 1983 /* mempool bind completed, no longer need any mempools in the table */ 1984 dm_table_free_md_mempools(t); 1985 return ret; 1986 } 1987 1988 /* 1989 * Bind a table to the device. 1990 */ 1991 static void event_callback(void *context) 1992 { 1993 unsigned long flags; 1994 LIST_HEAD(uevents); 1995 struct mapped_device *md = (struct mapped_device *) context; 1996 1997 spin_lock_irqsave(&md->uevent_lock, flags); 1998 list_splice_init(&md->uevent_list, &uevents); 1999 spin_unlock_irqrestore(&md->uevent_lock, flags); 2000 2001 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 2002 2003 atomic_inc(&md->event_nr); 2004 wake_up(&md->eventq); 2005 dm_issue_global_event(); 2006 } 2007 2008 /* 2009 * Returns old map, which caller must destroy. 2010 */ 2011 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2012 struct queue_limits *limits) 2013 { 2014 struct dm_table *old_map; 2015 sector_t size; 2016 int ret; 2017 2018 lockdep_assert_held(&md->suspend_lock); 2019 2020 size = dm_table_get_size(t); 2021 2022 /* 2023 * Wipe any geometry if the size of the table changed. 2024 */ 2025 if (size != dm_get_size(md)) 2026 memset(&md->geometry, 0, sizeof(md->geometry)); 2027 2028 if (!get_capacity(md->disk)) 2029 set_capacity(md->disk, size); 2030 else 2031 set_capacity_and_notify(md->disk, size); 2032 2033 dm_table_event_callback(t, event_callback, md); 2034 2035 if (dm_table_request_based(t)) { 2036 /* 2037 * Leverage the fact that request-based DM targets are 2038 * immutable singletons - used to optimize dm_mq_queue_rq. 2039 */ 2040 md->immutable_target = dm_table_get_immutable_target(t); 2041 } 2042 2043 ret = __bind_mempools(md, t); 2044 if (ret) { 2045 old_map = ERR_PTR(ret); 2046 goto out; 2047 } 2048 2049 ret = dm_table_set_restrictions(t, md->queue, limits); 2050 if (ret) { 2051 old_map = ERR_PTR(ret); 2052 goto out; 2053 } 2054 2055 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2056 rcu_assign_pointer(md->map, (void *)t); 2057 md->immutable_target_type = dm_table_get_immutable_target_type(t); 2058 2059 if (old_map) 2060 dm_sync_table(md); 2061 out: 2062 return old_map; 2063 } 2064 2065 /* 2066 * Returns unbound table for the caller to free. 2067 */ 2068 static struct dm_table *__unbind(struct mapped_device *md) 2069 { 2070 struct dm_table *map = rcu_dereference_protected(md->map, 1); 2071 2072 if (!map) 2073 return NULL; 2074 2075 dm_table_event_callback(map, NULL, NULL); 2076 RCU_INIT_POINTER(md->map, NULL); 2077 dm_sync_table(md); 2078 2079 return map; 2080 } 2081 2082 /* 2083 * Constructor for a new device. 2084 */ 2085 int dm_create(int minor, struct mapped_device **result) 2086 { 2087 struct mapped_device *md; 2088 2089 md = alloc_dev(minor); 2090 if (!md) 2091 return -ENXIO; 2092 2093 dm_ima_reset_data(md); 2094 2095 *result = md; 2096 return 0; 2097 } 2098 2099 /* 2100 * Functions to manage md->type. 2101 * All are required to hold md->type_lock. 2102 */ 2103 void dm_lock_md_type(struct mapped_device *md) 2104 { 2105 mutex_lock(&md->type_lock); 2106 } 2107 2108 void dm_unlock_md_type(struct mapped_device *md) 2109 { 2110 mutex_unlock(&md->type_lock); 2111 } 2112 2113 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2114 { 2115 BUG_ON(!mutex_is_locked(&md->type_lock)); 2116 md->type = type; 2117 } 2118 2119 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2120 { 2121 return md->type; 2122 } 2123 2124 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 2125 { 2126 return md->immutable_target_type; 2127 } 2128 2129 /* 2130 * The queue_limits are only valid as long as you have a reference 2131 * count on 'md'. 2132 */ 2133 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2134 { 2135 BUG_ON(!atomic_read(&md->holders)); 2136 return &md->queue->limits; 2137 } 2138 EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2139 2140 /* 2141 * Setup the DM device's queue based on md's type 2142 */ 2143 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 2144 { 2145 enum dm_queue_mode type = dm_table_get_type(t); 2146 struct queue_limits limits; 2147 int r; 2148 2149 switch (type) { 2150 case DM_TYPE_REQUEST_BASED: 2151 md->disk->fops = &dm_rq_blk_dops; 2152 r = dm_mq_init_request_queue(md, t); 2153 if (r) { 2154 DMERR("Cannot initialize queue for request-based dm mapped device"); 2155 return r; 2156 } 2157 break; 2158 case DM_TYPE_BIO_BASED: 2159 case DM_TYPE_DAX_BIO_BASED: 2160 break; 2161 case DM_TYPE_NONE: 2162 WARN_ON_ONCE(true); 2163 break; 2164 } 2165 2166 r = dm_calculate_queue_limits(t, &limits); 2167 if (r) { 2168 DMERR("Cannot calculate initial queue limits"); 2169 return r; 2170 } 2171 r = dm_table_set_restrictions(t, md->queue, &limits); 2172 if (r) 2173 return r; 2174 2175 r = add_disk(md->disk); 2176 if (r) 2177 return r; 2178 2179 r = dm_sysfs_init(md); 2180 if (r) { 2181 del_gendisk(md->disk); 2182 return r; 2183 } 2184 md->type = type; 2185 return 0; 2186 } 2187 2188 struct mapped_device *dm_get_md(dev_t dev) 2189 { 2190 struct mapped_device *md; 2191 unsigned minor = MINOR(dev); 2192 2193 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 2194 return NULL; 2195 2196 spin_lock(&_minor_lock); 2197 2198 md = idr_find(&_minor_idr, minor); 2199 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 2200 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2201 md = NULL; 2202 goto out; 2203 } 2204 dm_get(md); 2205 out: 2206 spin_unlock(&_minor_lock); 2207 2208 return md; 2209 } 2210 EXPORT_SYMBOL_GPL(dm_get_md); 2211 2212 void *dm_get_mdptr(struct mapped_device *md) 2213 { 2214 return md->interface_ptr; 2215 } 2216 2217 void dm_set_mdptr(struct mapped_device *md, void *ptr) 2218 { 2219 md->interface_ptr = ptr; 2220 } 2221 2222 void dm_get(struct mapped_device *md) 2223 { 2224 atomic_inc(&md->holders); 2225 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2226 } 2227 2228 int dm_hold(struct mapped_device *md) 2229 { 2230 spin_lock(&_minor_lock); 2231 if (test_bit(DMF_FREEING, &md->flags)) { 2232 spin_unlock(&_minor_lock); 2233 return -EBUSY; 2234 } 2235 dm_get(md); 2236 spin_unlock(&_minor_lock); 2237 return 0; 2238 } 2239 EXPORT_SYMBOL_GPL(dm_hold); 2240 2241 const char *dm_device_name(struct mapped_device *md) 2242 { 2243 return md->name; 2244 } 2245 EXPORT_SYMBOL_GPL(dm_device_name); 2246 2247 static void __dm_destroy(struct mapped_device *md, bool wait) 2248 { 2249 struct dm_table *map; 2250 int srcu_idx; 2251 2252 might_sleep(); 2253 2254 spin_lock(&_minor_lock); 2255 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2256 set_bit(DMF_FREEING, &md->flags); 2257 spin_unlock(&_minor_lock); 2258 2259 blk_mark_disk_dead(md->disk); 2260 2261 /* 2262 * Take suspend_lock so that presuspend and postsuspend methods 2263 * do not race with internal suspend. 2264 */ 2265 mutex_lock(&md->suspend_lock); 2266 map = dm_get_live_table(md, &srcu_idx); 2267 if (!dm_suspended_md(md)) { 2268 dm_table_presuspend_targets(map); 2269 set_bit(DMF_SUSPENDED, &md->flags); 2270 set_bit(DMF_POST_SUSPENDING, &md->flags); 2271 dm_table_postsuspend_targets(map); 2272 } 2273 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2274 dm_put_live_table(md, srcu_idx); 2275 mutex_unlock(&md->suspend_lock); 2276 2277 /* 2278 * Rare, but there may be I/O requests still going to complete, 2279 * for example. Wait for all references to disappear. 2280 * No one should increment the reference count of the mapped_device, 2281 * after the mapped_device state becomes DMF_FREEING. 2282 */ 2283 if (wait) 2284 while (atomic_read(&md->holders)) 2285 msleep(1); 2286 else if (atomic_read(&md->holders)) 2287 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2288 dm_device_name(md), atomic_read(&md->holders)); 2289 2290 dm_table_destroy(__unbind(md)); 2291 free_dev(md); 2292 } 2293 2294 void dm_destroy(struct mapped_device *md) 2295 { 2296 __dm_destroy(md, true); 2297 } 2298 2299 void dm_destroy_immediate(struct mapped_device *md) 2300 { 2301 __dm_destroy(md, false); 2302 } 2303 2304 void dm_put(struct mapped_device *md) 2305 { 2306 atomic_dec(&md->holders); 2307 } 2308 EXPORT_SYMBOL_GPL(dm_put); 2309 2310 static bool dm_in_flight_bios(struct mapped_device *md) 2311 { 2312 int cpu; 2313 unsigned long sum = 0; 2314 2315 for_each_possible_cpu(cpu) 2316 sum += *per_cpu_ptr(md->pending_io, cpu); 2317 2318 return sum != 0; 2319 } 2320 2321 static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state) 2322 { 2323 int r = 0; 2324 DEFINE_WAIT(wait); 2325 2326 while (true) { 2327 prepare_to_wait(&md->wait, &wait, task_state); 2328 2329 if (!dm_in_flight_bios(md)) 2330 break; 2331 2332 if (signal_pending_state(task_state, current)) { 2333 r = -EINTR; 2334 break; 2335 } 2336 2337 io_schedule(); 2338 } 2339 finish_wait(&md->wait, &wait); 2340 2341 smp_rmb(); 2342 2343 return r; 2344 } 2345 2346 static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state) 2347 { 2348 int r = 0; 2349 2350 if (!queue_is_mq(md->queue)) 2351 return dm_wait_for_bios_completion(md, task_state); 2352 2353 while (true) { 2354 if (!blk_mq_queue_inflight(md->queue)) 2355 break; 2356 2357 if (signal_pending_state(task_state, current)) { 2358 r = -EINTR; 2359 break; 2360 } 2361 2362 msleep(5); 2363 } 2364 2365 return r; 2366 } 2367 2368 /* 2369 * Process the deferred bios 2370 */ 2371 static void dm_wq_work(struct work_struct *work) 2372 { 2373 struct mapped_device *md = container_of(work, struct mapped_device, work); 2374 struct bio *bio; 2375 2376 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2377 spin_lock_irq(&md->deferred_lock); 2378 bio = bio_list_pop(&md->deferred); 2379 spin_unlock_irq(&md->deferred_lock); 2380 2381 if (!bio) 2382 break; 2383 2384 submit_bio_noacct(bio); 2385 } 2386 } 2387 2388 static void dm_queue_flush(struct mapped_device *md) 2389 { 2390 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2391 smp_mb__after_atomic(); 2392 queue_work(md->wq, &md->work); 2393 } 2394 2395 /* 2396 * Swap in a new table, returning the old one for the caller to destroy. 2397 */ 2398 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 2399 { 2400 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2401 struct queue_limits limits; 2402 int r; 2403 2404 mutex_lock(&md->suspend_lock); 2405 2406 /* device must be suspended */ 2407 if (!dm_suspended_md(md)) 2408 goto out; 2409 2410 /* 2411 * If the new table has no data devices, retain the existing limits. 2412 * This helps multipath with queue_if_no_path if all paths disappear, 2413 * then new I/O is queued based on these limits, and then some paths 2414 * reappear. 2415 */ 2416 if (dm_table_has_no_data_devices(table)) { 2417 live_map = dm_get_live_table_fast(md); 2418 if (live_map) 2419 limits = md->queue->limits; 2420 dm_put_live_table_fast(md); 2421 } 2422 2423 if (!live_map) { 2424 r = dm_calculate_queue_limits(table, &limits); 2425 if (r) { 2426 map = ERR_PTR(r); 2427 goto out; 2428 } 2429 } 2430 2431 map = __bind(md, table, &limits); 2432 dm_issue_global_event(); 2433 2434 out: 2435 mutex_unlock(&md->suspend_lock); 2436 return map; 2437 } 2438 2439 /* 2440 * Functions to lock and unlock any filesystem running on the 2441 * device. 2442 */ 2443 static int lock_fs(struct mapped_device *md) 2444 { 2445 int r; 2446 2447 WARN_ON(test_bit(DMF_FROZEN, &md->flags)); 2448 2449 r = freeze_bdev(md->disk->part0); 2450 if (!r) 2451 set_bit(DMF_FROZEN, &md->flags); 2452 return r; 2453 } 2454 2455 static void unlock_fs(struct mapped_device *md) 2456 { 2457 if (!test_bit(DMF_FROZEN, &md->flags)) 2458 return; 2459 thaw_bdev(md->disk->part0); 2460 clear_bit(DMF_FROZEN, &md->flags); 2461 } 2462 2463 /* 2464 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2465 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2466 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2467 * 2468 * If __dm_suspend returns 0, the device is completely quiescent 2469 * now. There is no request-processing activity. All new requests 2470 * are being added to md->deferred list. 2471 */ 2472 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2473 unsigned suspend_flags, unsigned int task_state, 2474 int dmf_suspended_flag) 2475 { 2476 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2477 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2478 int r; 2479 2480 lockdep_assert_held(&md->suspend_lock); 2481 2482 /* 2483 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 2484 * This flag is cleared before dm_suspend returns. 2485 */ 2486 if (noflush) 2487 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2488 else 2489 DMDEBUG("%s: suspending with flush", dm_device_name(md)); 2490 2491 /* 2492 * This gets reverted if there's an error later and the targets 2493 * provide the .presuspend_undo hook. 2494 */ 2495 dm_table_presuspend_targets(map); 2496 2497 /* 2498 * Flush I/O to the device. 2499 * Any I/O submitted after lock_fs() may not be flushed. 2500 * noflush takes precedence over do_lockfs. 2501 * (lock_fs() flushes I/Os and waits for them to complete.) 2502 */ 2503 if (!noflush && do_lockfs) { 2504 r = lock_fs(md); 2505 if (r) { 2506 dm_table_presuspend_undo_targets(map); 2507 return r; 2508 } 2509 } 2510 2511 /* 2512 * Here we must make sure that no processes are submitting requests 2513 * to target drivers i.e. no one may be executing 2514 * dm_split_and_process_bio from dm_submit_bio. 2515 * 2516 * To get all processes out of dm_split_and_process_bio in dm_submit_bio, 2517 * we take the write lock. To prevent any process from reentering 2518 * dm_split_and_process_bio from dm_submit_bio and quiesce the thread 2519 * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call 2520 * flush_workqueue(md->wq). 2521 */ 2522 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2523 if (map) 2524 synchronize_srcu(&md->io_barrier); 2525 2526 /* 2527 * Stop md->queue before flushing md->wq in case request-based 2528 * dm defers requests to md->wq from md->queue. 2529 */ 2530 if (dm_request_based(md)) 2531 dm_stop_queue(md->queue); 2532 2533 flush_workqueue(md->wq); 2534 2535 /* 2536 * At this point no more requests are entering target request routines. 2537 * We call dm_wait_for_completion to wait for all existing requests 2538 * to finish. 2539 */ 2540 r = dm_wait_for_completion(md, task_state); 2541 if (!r) 2542 set_bit(dmf_suspended_flag, &md->flags); 2543 2544 if (noflush) 2545 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2546 if (map) 2547 synchronize_srcu(&md->io_barrier); 2548 2549 /* were we interrupted ? */ 2550 if (r < 0) { 2551 dm_queue_flush(md); 2552 2553 if (dm_request_based(md)) 2554 dm_start_queue(md->queue); 2555 2556 unlock_fs(md); 2557 dm_table_presuspend_undo_targets(map); 2558 /* pushback list is already flushed, so skip flush */ 2559 } 2560 2561 return r; 2562 } 2563 2564 /* 2565 * We need to be able to change a mapping table under a mounted 2566 * filesystem. For example we might want to move some data in 2567 * the background. Before the table can be swapped with 2568 * dm_bind_table, dm_suspend must be called to flush any in 2569 * flight bios and ensure that any further io gets deferred. 2570 */ 2571 /* 2572 * Suspend mechanism in request-based dm. 2573 * 2574 * 1. Flush all I/Os by lock_fs() if needed. 2575 * 2. Stop dispatching any I/O by stopping the request_queue. 2576 * 3. Wait for all in-flight I/Os to be completed or requeued. 2577 * 2578 * To abort suspend, start the request_queue. 2579 */ 2580 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2581 { 2582 struct dm_table *map = NULL; 2583 int r = 0; 2584 2585 retry: 2586 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2587 2588 if (dm_suspended_md(md)) { 2589 r = -EINVAL; 2590 goto out_unlock; 2591 } 2592 2593 if (dm_suspended_internally_md(md)) { 2594 /* already internally suspended, wait for internal resume */ 2595 mutex_unlock(&md->suspend_lock); 2596 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2597 if (r) 2598 return r; 2599 goto retry; 2600 } 2601 2602 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2603 2604 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2605 if (r) 2606 goto out_unlock; 2607 2608 set_bit(DMF_POST_SUSPENDING, &md->flags); 2609 dm_table_postsuspend_targets(map); 2610 clear_bit(DMF_POST_SUSPENDING, &md->flags); 2611 2612 out_unlock: 2613 mutex_unlock(&md->suspend_lock); 2614 return r; 2615 } 2616 2617 static int __dm_resume(struct mapped_device *md, struct dm_table *map) 2618 { 2619 if (map) { 2620 int r = dm_table_resume_targets(map); 2621 if (r) 2622 return r; 2623 } 2624 2625 dm_queue_flush(md); 2626 2627 /* 2628 * Flushing deferred I/Os must be done after targets are resumed 2629 * so that mapping of targets can work correctly. 2630 * Request-based dm is queueing the deferred I/Os in its request_queue. 2631 */ 2632 if (dm_request_based(md)) 2633 dm_start_queue(md->queue); 2634 2635 unlock_fs(md); 2636 2637 return 0; 2638 } 2639 2640 int dm_resume(struct mapped_device *md) 2641 { 2642 int r; 2643 struct dm_table *map = NULL; 2644 2645 retry: 2646 r = -EINVAL; 2647 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2648 2649 if (!dm_suspended_md(md)) 2650 goto out; 2651 2652 if (dm_suspended_internally_md(md)) { 2653 /* already internally suspended, wait for internal resume */ 2654 mutex_unlock(&md->suspend_lock); 2655 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2656 if (r) 2657 return r; 2658 goto retry; 2659 } 2660 2661 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2662 if (!map || !dm_table_get_size(map)) 2663 goto out; 2664 2665 r = __dm_resume(md, map); 2666 if (r) 2667 goto out; 2668 2669 clear_bit(DMF_SUSPENDED, &md->flags); 2670 out: 2671 mutex_unlock(&md->suspend_lock); 2672 2673 return r; 2674 } 2675 2676 /* 2677 * Internal suspend/resume works like userspace-driven suspend. It waits 2678 * until all bios finish and prevents issuing new bios to the target drivers. 2679 * It may be used only from the kernel. 2680 */ 2681 2682 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2683 { 2684 struct dm_table *map = NULL; 2685 2686 lockdep_assert_held(&md->suspend_lock); 2687 2688 if (md->internal_suspend_count++) 2689 return; /* nested internal suspend */ 2690 2691 if (dm_suspended_md(md)) { 2692 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2693 return; /* nest suspend */ 2694 } 2695 2696 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2697 2698 /* 2699 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2700 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2701 * would require changing .presuspend to return an error -- avoid this 2702 * until there is a need for more elaborate variants of internal suspend. 2703 */ 2704 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2705 DMF_SUSPENDED_INTERNALLY); 2706 2707 set_bit(DMF_POST_SUSPENDING, &md->flags); 2708 dm_table_postsuspend_targets(map); 2709 clear_bit(DMF_POST_SUSPENDING, &md->flags); 2710 } 2711 2712 static void __dm_internal_resume(struct mapped_device *md) 2713 { 2714 BUG_ON(!md->internal_suspend_count); 2715 2716 if (--md->internal_suspend_count) 2717 return; /* resume from nested internal suspend */ 2718 2719 if (dm_suspended_md(md)) 2720 goto done; /* resume from nested suspend */ 2721 2722 /* 2723 * NOTE: existing callers don't need to call dm_table_resume_targets 2724 * (which may fail -- so best to avoid it for now by passing NULL map) 2725 */ 2726 (void) __dm_resume(md, NULL); 2727 2728 done: 2729 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2730 smp_mb__after_atomic(); 2731 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2732 } 2733 2734 void dm_internal_suspend_noflush(struct mapped_device *md) 2735 { 2736 mutex_lock(&md->suspend_lock); 2737 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2738 mutex_unlock(&md->suspend_lock); 2739 } 2740 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2741 2742 void dm_internal_resume(struct mapped_device *md) 2743 { 2744 mutex_lock(&md->suspend_lock); 2745 __dm_internal_resume(md); 2746 mutex_unlock(&md->suspend_lock); 2747 } 2748 EXPORT_SYMBOL_GPL(dm_internal_resume); 2749 2750 /* 2751 * Fast variants of internal suspend/resume hold md->suspend_lock, 2752 * which prevents interaction with userspace-driven suspend. 2753 */ 2754 2755 void dm_internal_suspend_fast(struct mapped_device *md) 2756 { 2757 mutex_lock(&md->suspend_lock); 2758 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2759 return; 2760 2761 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2762 synchronize_srcu(&md->io_barrier); 2763 flush_workqueue(md->wq); 2764 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2765 } 2766 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2767 2768 void dm_internal_resume_fast(struct mapped_device *md) 2769 { 2770 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2771 goto done; 2772 2773 dm_queue_flush(md); 2774 2775 done: 2776 mutex_unlock(&md->suspend_lock); 2777 } 2778 EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2779 2780 /*----------------------------------------------------------------- 2781 * Event notification. 2782 *---------------------------------------------------------------*/ 2783 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 2784 unsigned cookie) 2785 { 2786 int r; 2787 unsigned noio_flag; 2788 char udev_cookie[DM_COOKIE_LENGTH]; 2789 char *envp[] = { udev_cookie, NULL }; 2790 2791 noio_flag = memalloc_noio_save(); 2792 2793 if (!cookie) 2794 r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 2795 else { 2796 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 2797 DM_COOKIE_ENV_VAR_NAME, cookie); 2798 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 2799 action, envp); 2800 } 2801 2802 memalloc_noio_restore(noio_flag); 2803 2804 return r; 2805 } 2806 2807 uint32_t dm_next_uevent_seq(struct mapped_device *md) 2808 { 2809 return atomic_add_return(1, &md->uevent_seq); 2810 } 2811 2812 uint32_t dm_get_event_nr(struct mapped_device *md) 2813 { 2814 return atomic_read(&md->event_nr); 2815 } 2816 2817 int dm_wait_event(struct mapped_device *md, int event_nr) 2818 { 2819 return wait_event_interruptible(md->eventq, 2820 (event_nr != atomic_read(&md->event_nr))); 2821 } 2822 2823 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 2824 { 2825 unsigned long flags; 2826 2827 spin_lock_irqsave(&md->uevent_lock, flags); 2828 list_add(elist, &md->uevent_list); 2829 spin_unlock_irqrestore(&md->uevent_lock, flags); 2830 } 2831 2832 /* 2833 * The gendisk is only valid as long as you have a reference 2834 * count on 'md'. 2835 */ 2836 struct gendisk *dm_disk(struct mapped_device *md) 2837 { 2838 return md->disk; 2839 } 2840 EXPORT_SYMBOL_GPL(dm_disk); 2841 2842 struct kobject *dm_kobject(struct mapped_device *md) 2843 { 2844 return &md->kobj_holder.kobj; 2845 } 2846 2847 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2848 { 2849 struct mapped_device *md; 2850 2851 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2852 2853 spin_lock(&_minor_lock); 2854 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2855 md = NULL; 2856 goto out; 2857 } 2858 dm_get(md); 2859 out: 2860 spin_unlock(&_minor_lock); 2861 2862 return md; 2863 } 2864 2865 int dm_suspended_md(struct mapped_device *md) 2866 { 2867 return test_bit(DMF_SUSPENDED, &md->flags); 2868 } 2869 2870 static int dm_post_suspending_md(struct mapped_device *md) 2871 { 2872 return test_bit(DMF_POST_SUSPENDING, &md->flags); 2873 } 2874 2875 int dm_suspended_internally_md(struct mapped_device *md) 2876 { 2877 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2878 } 2879 2880 int dm_test_deferred_remove_flag(struct mapped_device *md) 2881 { 2882 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 2883 } 2884 2885 int dm_suspended(struct dm_target *ti) 2886 { 2887 return dm_suspended_md(ti->table->md); 2888 } 2889 EXPORT_SYMBOL_GPL(dm_suspended); 2890 2891 int dm_post_suspending(struct dm_target *ti) 2892 { 2893 return dm_post_suspending_md(ti->table->md); 2894 } 2895 EXPORT_SYMBOL_GPL(dm_post_suspending); 2896 2897 int dm_noflush_suspending(struct dm_target *ti) 2898 { 2899 return __noflush_suspending(ti->table->md); 2900 } 2901 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2902 2903 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 2904 unsigned integrity, unsigned per_io_data_size, 2905 unsigned min_pool_size) 2906 { 2907 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 2908 unsigned int pool_size = 0; 2909 unsigned int front_pad, io_front_pad; 2910 int ret; 2911 2912 if (!pools) 2913 return NULL; 2914 2915 switch (type) { 2916 case DM_TYPE_BIO_BASED: 2917 case DM_TYPE_DAX_BIO_BASED: 2918 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 2919 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET; 2920 io_front_pad = roundup(per_io_data_size, __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET; 2921 ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); 2922 if (ret) 2923 goto out; 2924 if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) 2925 goto out; 2926 break; 2927 case DM_TYPE_REQUEST_BASED: 2928 pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 2929 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2930 /* per_io_data_size is used for blk-mq pdu at queue allocation */ 2931 break; 2932 default: 2933 BUG(); 2934 } 2935 2936 ret = bioset_init(&pools->bs, pool_size, front_pad, 0); 2937 if (ret) 2938 goto out; 2939 2940 if (integrity && bioset_integrity_create(&pools->bs, pool_size)) 2941 goto out; 2942 2943 return pools; 2944 2945 out: 2946 dm_free_md_mempools(pools); 2947 2948 return NULL; 2949 } 2950 2951 void dm_free_md_mempools(struct dm_md_mempools *pools) 2952 { 2953 if (!pools) 2954 return; 2955 2956 bioset_exit(&pools->bs); 2957 bioset_exit(&pools->io_bs); 2958 2959 kfree(pools); 2960 } 2961 2962 struct dm_pr { 2963 u64 old_key; 2964 u64 new_key; 2965 u32 flags; 2966 bool fail_early; 2967 }; 2968 2969 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 2970 void *data) 2971 { 2972 struct mapped_device *md = bdev->bd_disk->private_data; 2973 struct dm_table *table; 2974 struct dm_target *ti; 2975 int ret = -ENOTTY, srcu_idx; 2976 2977 table = dm_get_live_table(md, &srcu_idx); 2978 if (!table || !dm_table_get_size(table)) 2979 goto out; 2980 2981 /* We only support devices that have a single target */ 2982 if (dm_table_get_num_targets(table) != 1) 2983 goto out; 2984 ti = dm_table_get_target(table, 0); 2985 2986 ret = -EINVAL; 2987 if (!ti->type->iterate_devices) 2988 goto out; 2989 2990 ret = ti->type->iterate_devices(ti, fn, data); 2991 out: 2992 dm_put_live_table(md, srcu_idx); 2993 return ret; 2994 } 2995 2996 /* 2997 * For register / unregister we need to manually call out to every path. 2998 */ 2999 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 3000 sector_t start, sector_t len, void *data) 3001 { 3002 struct dm_pr *pr = data; 3003 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3004 3005 if (!ops || !ops->pr_register) 3006 return -EOPNOTSUPP; 3007 return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 3008 } 3009 3010 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 3011 u32 flags) 3012 { 3013 struct dm_pr pr = { 3014 .old_key = old_key, 3015 .new_key = new_key, 3016 .flags = flags, 3017 .fail_early = true, 3018 }; 3019 int ret; 3020 3021 ret = dm_call_pr(bdev, __dm_pr_register, &pr); 3022 if (ret && new_key) { 3023 /* unregister all paths if we failed to register any path */ 3024 pr.old_key = new_key; 3025 pr.new_key = 0; 3026 pr.flags = 0; 3027 pr.fail_early = false; 3028 dm_call_pr(bdev, __dm_pr_register, &pr); 3029 } 3030 3031 return ret; 3032 } 3033 3034 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 3035 u32 flags) 3036 { 3037 struct mapped_device *md = bdev->bd_disk->private_data; 3038 const struct pr_ops *ops; 3039 int r, srcu_idx; 3040 3041 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3042 if (r < 0) 3043 goto out; 3044 3045 ops = bdev->bd_disk->fops->pr_ops; 3046 if (ops && ops->pr_reserve) 3047 r = ops->pr_reserve(bdev, key, type, flags); 3048 else 3049 r = -EOPNOTSUPP; 3050 out: 3051 dm_unprepare_ioctl(md, srcu_idx); 3052 return r; 3053 } 3054 3055 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 3056 { 3057 struct mapped_device *md = bdev->bd_disk->private_data; 3058 const struct pr_ops *ops; 3059 int r, srcu_idx; 3060 3061 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3062 if (r < 0) 3063 goto out; 3064 3065 ops = bdev->bd_disk->fops->pr_ops; 3066 if (ops && ops->pr_release) 3067 r = ops->pr_release(bdev, key, type); 3068 else 3069 r = -EOPNOTSUPP; 3070 out: 3071 dm_unprepare_ioctl(md, srcu_idx); 3072 return r; 3073 } 3074 3075 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 3076 enum pr_type type, bool abort) 3077 { 3078 struct mapped_device *md = bdev->bd_disk->private_data; 3079 const struct pr_ops *ops; 3080 int r, srcu_idx; 3081 3082 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3083 if (r < 0) 3084 goto out; 3085 3086 ops = bdev->bd_disk->fops->pr_ops; 3087 if (ops && ops->pr_preempt) 3088 r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 3089 else 3090 r = -EOPNOTSUPP; 3091 out: 3092 dm_unprepare_ioctl(md, srcu_idx); 3093 return r; 3094 } 3095 3096 static int dm_pr_clear(struct block_device *bdev, u64 key) 3097 { 3098 struct mapped_device *md = bdev->bd_disk->private_data; 3099 const struct pr_ops *ops; 3100 int r, srcu_idx; 3101 3102 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3103 if (r < 0) 3104 goto out; 3105 3106 ops = bdev->bd_disk->fops->pr_ops; 3107 if (ops && ops->pr_clear) 3108 r = ops->pr_clear(bdev, key); 3109 else 3110 r = -EOPNOTSUPP; 3111 out: 3112 dm_unprepare_ioctl(md, srcu_idx); 3113 return r; 3114 } 3115 3116 static const struct pr_ops dm_pr_ops = { 3117 .pr_register = dm_pr_register, 3118 .pr_reserve = dm_pr_reserve, 3119 .pr_release = dm_pr_release, 3120 .pr_preempt = dm_pr_preempt, 3121 .pr_clear = dm_pr_clear, 3122 }; 3123 3124 static const struct block_device_operations dm_blk_dops = { 3125 .submit_bio = dm_submit_bio, 3126 .poll_bio = dm_poll_bio, 3127 .open = dm_blk_open, 3128 .release = dm_blk_close, 3129 .ioctl = dm_blk_ioctl, 3130 .getgeo = dm_blk_getgeo, 3131 .report_zones = dm_blk_report_zones, 3132 .pr_ops = &dm_pr_ops, 3133 .owner = THIS_MODULE 3134 }; 3135 3136 static const struct block_device_operations dm_rq_blk_dops = { 3137 .open = dm_blk_open, 3138 .release = dm_blk_close, 3139 .ioctl = dm_blk_ioctl, 3140 .getgeo = dm_blk_getgeo, 3141 .pr_ops = &dm_pr_ops, 3142 .owner = THIS_MODULE 3143 }; 3144 3145 static const struct dax_operations dm_dax_ops = { 3146 .direct_access = dm_dax_direct_access, 3147 .zero_page_range = dm_dax_zero_page_range, 3148 }; 3149 3150 /* 3151 * module hooks 3152 */ 3153 module_init(dm_init); 3154 module_exit(dm_exit); 3155 3156 module_param(major, uint, 0); 3157 MODULE_PARM_DESC(major, "The major number of the device mapper"); 3158 3159 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3160 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3161 3162 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3163 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3164 3165 module_param(swap_bios, int, S_IRUGO | S_IWUSR); 3166 MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs"); 3167 3168 MODULE_DESCRIPTION(DM_NAME " driver"); 3169 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3170 MODULE_LICENSE("GPL"); 3171