1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-core.h" 9 #include "dm-rq.h" 10 #include "dm-uevent.h" 11 #include "dm-ima.h" 12 13 #include <linux/init.h> 14 #include <linux/module.h> 15 #include <linux/mutex.h> 16 #include <linux/sched/mm.h> 17 #include <linux/sched/signal.h> 18 #include <linux/blkpg.h> 19 #include <linux/bio.h> 20 #include <linux/mempool.h> 21 #include <linux/dax.h> 22 #include <linux/slab.h> 23 #include <linux/idr.h> 24 #include <linux/uio.h> 25 #include <linux/hdreg.h> 26 #include <linux/delay.h> 27 #include <linux/wait.h> 28 #include <linux/pr.h> 29 #include <linux/refcount.h> 30 #include <linux/part_stat.h> 31 #include <linux/blk-crypto.h> 32 #include <linux/blk-crypto-profile.h> 33 34 #define DM_MSG_PREFIX "core" 35 36 /* 37 * Cookies are numeric values sent with CHANGE and REMOVE 38 * uevents while resuming, removing or renaming the device. 39 */ 40 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 41 #define DM_COOKIE_LENGTH 24 42 43 /* 44 * For REQ_POLLED fs bio, this flag is set if we link mapped underlying 45 * dm_io into one list, and reuse bio->bi_private as the list head. Before 46 * ending this fs bio, we will recover its ->bi_private. 47 */ 48 #define REQ_DM_POLL_LIST REQ_DRV 49 50 static const char *_name = DM_NAME; 51 52 static unsigned int major = 0; 53 static unsigned int _major = 0; 54 55 static DEFINE_IDR(_minor_idr); 56 57 static DEFINE_SPINLOCK(_minor_lock); 58 59 static void do_deferred_remove(struct work_struct *w); 60 61 static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 62 63 static struct workqueue_struct *deferred_remove_workqueue; 64 65 atomic_t dm_global_event_nr = ATOMIC_INIT(0); 66 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 67 68 void dm_issue_global_event(void) 69 { 70 atomic_inc(&dm_global_event_nr); 71 wake_up(&dm_global_eventq); 72 } 73 74 /* 75 * One of these is allocated (on-stack) per original bio. 76 */ 77 struct clone_info { 78 struct dm_table *map; 79 struct bio *bio; 80 struct dm_io *io; 81 sector_t sector; 82 unsigned sector_count; 83 bool submit_as_polled; 84 }; 85 86 #define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone)) 87 #define DM_IO_BIO_OFFSET \ 88 (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio)) 89 90 static inline struct dm_target_io *clone_to_tio(struct bio *clone) 91 { 92 return container_of(clone, struct dm_target_io, clone); 93 } 94 95 void *dm_per_bio_data(struct bio *bio, size_t data_size) 96 { 97 if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO)) 98 return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; 99 return (char *)bio - DM_IO_BIO_OFFSET - data_size; 100 } 101 EXPORT_SYMBOL_GPL(dm_per_bio_data); 102 103 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 104 { 105 struct dm_io *io = (struct dm_io *)((char *)data + data_size); 106 if (io->magic == DM_IO_MAGIC) 107 return (struct bio *)((char *)io + DM_IO_BIO_OFFSET); 108 BUG_ON(io->magic != DM_TIO_MAGIC); 109 return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET); 110 } 111 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 112 113 unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 114 { 115 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 116 } 117 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 118 119 #define MINOR_ALLOCED ((void *)-1) 120 121 #define DM_NUMA_NODE NUMA_NO_NODE 122 static int dm_numa_node = DM_NUMA_NODE; 123 124 #define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE) 125 static int swap_bios = DEFAULT_SWAP_BIOS; 126 static int get_swap_bios(void) 127 { 128 int latch = READ_ONCE(swap_bios); 129 if (unlikely(latch <= 0)) 130 latch = DEFAULT_SWAP_BIOS; 131 return latch; 132 } 133 134 /* 135 * For mempools pre-allocation at the table loading time. 136 */ 137 struct dm_md_mempools { 138 struct bio_set bs; 139 struct bio_set io_bs; 140 }; 141 142 struct table_device { 143 struct list_head list; 144 refcount_t count; 145 struct dm_dev dm_dev; 146 }; 147 148 /* 149 * Bio-based DM's mempools' reserved IOs set by the user. 150 */ 151 #define RESERVED_BIO_BASED_IOS 16 152 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 153 154 static int __dm_get_module_param_int(int *module_param, int min, int max) 155 { 156 int param = READ_ONCE(*module_param); 157 int modified_param = 0; 158 bool modified = true; 159 160 if (param < min) 161 modified_param = min; 162 else if (param > max) 163 modified_param = max; 164 else 165 modified = false; 166 167 if (modified) { 168 (void)cmpxchg(module_param, param, modified_param); 169 param = modified_param; 170 } 171 172 return param; 173 } 174 175 unsigned __dm_get_module_param(unsigned *module_param, 176 unsigned def, unsigned max) 177 { 178 unsigned param = READ_ONCE(*module_param); 179 unsigned modified_param = 0; 180 181 if (!param) 182 modified_param = def; 183 else if (param > max) 184 modified_param = max; 185 186 if (modified_param) { 187 (void)cmpxchg(module_param, param, modified_param); 188 param = modified_param; 189 } 190 191 return param; 192 } 193 194 unsigned dm_get_reserved_bio_based_ios(void) 195 { 196 return __dm_get_module_param(&reserved_bio_based_ios, 197 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 198 } 199 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 200 201 static unsigned dm_get_numa_node(void) 202 { 203 return __dm_get_module_param_int(&dm_numa_node, 204 DM_NUMA_NODE, num_online_nodes() - 1); 205 } 206 207 static int __init local_init(void) 208 { 209 int r; 210 211 r = dm_uevent_init(); 212 if (r) 213 return r; 214 215 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 216 if (!deferred_remove_workqueue) { 217 r = -ENOMEM; 218 goto out_uevent_exit; 219 } 220 221 _major = major; 222 r = register_blkdev(_major, _name); 223 if (r < 0) 224 goto out_free_workqueue; 225 226 if (!_major) 227 _major = r; 228 229 return 0; 230 231 out_free_workqueue: 232 destroy_workqueue(deferred_remove_workqueue); 233 out_uevent_exit: 234 dm_uevent_exit(); 235 236 return r; 237 } 238 239 static void local_exit(void) 240 { 241 flush_scheduled_work(); 242 destroy_workqueue(deferred_remove_workqueue); 243 244 unregister_blkdev(_major, _name); 245 dm_uevent_exit(); 246 247 _major = 0; 248 249 DMINFO("cleaned up"); 250 } 251 252 static int (*_inits[])(void) __initdata = { 253 local_init, 254 dm_target_init, 255 dm_linear_init, 256 dm_stripe_init, 257 dm_io_init, 258 dm_kcopyd_init, 259 dm_interface_init, 260 dm_statistics_init, 261 }; 262 263 static void (*_exits[])(void) = { 264 local_exit, 265 dm_target_exit, 266 dm_linear_exit, 267 dm_stripe_exit, 268 dm_io_exit, 269 dm_kcopyd_exit, 270 dm_interface_exit, 271 dm_statistics_exit, 272 }; 273 274 static int __init dm_init(void) 275 { 276 const int count = ARRAY_SIZE(_inits); 277 int r, i; 278 279 #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) 280 DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled." 281 " Duplicate IMA measurements will not be recorded in the IMA log."); 282 #endif 283 284 for (i = 0; i < count; i++) { 285 r = _inits[i](); 286 if (r) 287 goto bad; 288 } 289 290 return 0; 291 bad: 292 while (i--) 293 _exits[i](); 294 295 return r; 296 } 297 298 static void __exit dm_exit(void) 299 { 300 int i = ARRAY_SIZE(_exits); 301 302 while (i--) 303 _exits[i](); 304 305 /* 306 * Should be empty by this point. 307 */ 308 idr_destroy(&_minor_idr); 309 } 310 311 /* 312 * Block device functions 313 */ 314 int dm_deleting_md(struct mapped_device *md) 315 { 316 return test_bit(DMF_DELETING, &md->flags); 317 } 318 319 static int dm_blk_open(struct block_device *bdev, fmode_t mode) 320 { 321 struct mapped_device *md; 322 323 spin_lock(&_minor_lock); 324 325 md = bdev->bd_disk->private_data; 326 if (!md) 327 goto out; 328 329 if (test_bit(DMF_FREEING, &md->flags) || 330 dm_deleting_md(md)) { 331 md = NULL; 332 goto out; 333 } 334 335 dm_get(md); 336 atomic_inc(&md->open_count); 337 out: 338 spin_unlock(&_minor_lock); 339 340 return md ? 0 : -ENXIO; 341 } 342 343 static void dm_blk_close(struct gendisk *disk, fmode_t mode) 344 { 345 struct mapped_device *md; 346 347 spin_lock(&_minor_lock); 348 349 md = disk->private_data; 350 if (WARN_ON(!md)) 351 goto out; 352 353 if (atomic_dec_and_test(&md->open_count) && 354 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 355 queue_work(deferred_remove_workqueue, &deferred_remove_work); 356 357 dm_put(md); 358 out: 359 spin_unlock(&_minor_lock); 360 } 361 362 int dm_open_count(struct mapped_device *md) 363 { 364 return atomic_read(&md->open_count); 365 } 366 367 /* 368 * Guarantees nothing is using the device before it's deleted. 369 */ 370 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 371 { 372 int r = 0; 373 374 spin_lock(&_minor_lock); 375 376 if (dm_open_count(md)) { 377 r = -EBUSY; 378 if (mark_deferred) 379 set_bit(DMF_DEFERRED_REMOVE, &md->flags); 380 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 381 r = -EEXIST; 382 else 383 set_bit(DMF_DELETING, &md->flags); 384 385 spin_unlock(&_minor_lock); 386 387 return r; 388 } 389 390 int dm_cancel_deferred_remove(struct mapped_device *md) 391 { 392 int r = 0; 393 394 spin_lock(&_minor_lock); 395 396 if (test_bit(DMF_DELETING, &md->flags)) 397 r = -EBUSY; 398 else 399 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 400 401 spin_unlock(&_minor_lock); 402 403 return r; 404 } 405 406 static void do_deferred_remove(struct work_struct *w) 407 { 408 dm_deferred_remove(); 409 } 410 411 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 412 { 413 struct mapped_device *md = bdev->bd_disk->private_data; 414 415 return dm_get_geometry(md, geo); 416 } 417 418 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 419 struct block_device **bdev) 420 { 421 struct dm_target *tgt; 422 struct dm_table *map; 423 int r; 424 425 retry: 426 r = -ENOTTY; 427 map = dm_get_live_table(md, srcu_idx); 428 if (!map || !dm_table_get_size(map)) 429 return r; 430 431 /* We only support devices that have a single target */ 432 if (dm_table_get_num_targets(map) != 1) 433 return r; 434 435 tgt = dm_table_get_target(map, 0); 436 if (!tgt->type->prepare_ioctl) 437 return r; 438 439 if (dm_suspended_md(md)) 440 return -EAGAIN; 441 442 r = tgt->type->prepare_ioctl(tgt, bdev); 443 if (r == -ENOTCONN && !fatal_signal_pending(current)) { 444 dm_put_live_table(md, *srcu_idx); 445 msleep(10); 446 goto retry; 447 } 448 449 return r; 450 } 451 452 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 453 { 454 dm_put_live_table(md, srcu_idx); 455 } 456 457 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 458 unsigned int cmd, unsigned long arg) 459 { 460 struct mapped_device *md = bdev->bd_disk->private_data; 461 int r, srcu_idx; 462 463 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 464 if (r < 0) 465 goto out; 466 467 if (r > 0) { 468 /* 469 * Target determined this ioctl is being issued against a 470 * subset of the parent bdev; require extra privileges. 471 */ 472 if (!capable(CAP_SYS_RAWIO)) { 473 DMDEBUG_LIMIT( 474 "%s: sending ioctl %x to DM device without required privilege.", 475 current->comm, cmd); 476 r = -ENOIOCTLCMD; 477 goto out; 478 } 479 } 480 481 if (!bdev->bd_disk->fops->ioctl) 482 r = -ENOTTY; 483 else 484 r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); 485 out: 486 dm_unprepare_ioctl(md, srcu_idx); 487 return r; 488 } 489 490 u64 dm_start_time_ns_from_clone(struct bio *bio) 491 { 492 return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time); 493 } 494 EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); 495 496 static bool bio_is_flush_with_data(struct bio *bio) 497 { 498 return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size); 499 } 500 501 static void dm_io_acct(bool end, struct mapped_device *md, struct bio *bio, 502 unsigned long start_time, struct dm_stats_aux *stats_aux) 503 { 504 bool is_flush_with_data; 505 unsigned int bi_size; 506 507 /* If REQ_PREFLUSH set save any payload but do not account it */ 508 is_flush_with_data = bio_is_flush_with_data(bio); 509 if (is_flush_with_data) { 510 bi_size = bio->bi_iter.bi_size; 511 bio->bi_iter.bi_size = 0; 512 } 513 514 if (!end) 515 bio_start_io_acct_time(bio, start_time); 516 else 517 bio_end_io_acct(bio, start_time); 518 519 if (unlikely(dm_stats_used(&md->stats))) 520 dm_stats_account_io(&md->stats, bio_data_dir(bio), 521 bio->bi_iter.bi_sector, bio_sectors(bio), 522 end, start_time, stats_aux); 523 524 /* Restore bio's payload so it does get accounted upon requeue */ 525 if (is_flush_with_data) 526 bio->bi_iter.bi_size = bi_size; 527 } 528 529 static void __dm_start_io_acct(struct dm_io *io, struct bio *bio) 530 { 531 dm_io_acct(false, io->md, bio, io->start_time, &io->stats_aux); 532 } 533 534 static void dm_start_io_acct(struct dm_io *io, struct bio *clone) 535 { 536 /* Must account IO to DM device in terms of orig_bio */ 537 struct bio *bio = io->orig_bio; 538 539 /* 540 * Ensure IO accounting is only ever started once. 541 * Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO. 542 */ 543 if (!clone || 544 likely(!dm_tio_flagged(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO))) { 545 if (WARN_ON_ONCE(dm_io_flagged(io, DM_IO_ACCOUNTED))) 546 return; 547 dm_io_set_flag(io, DM_IO_ACCOUNTED); 548 } else { 549 unsigned long flags; 550 if (dm_io_flagged(io, DM_IO_ACCOUNTED)) 551 return; 552 /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */ 553 spin_lock_irqsave(&io->lock, flags); 554 dm_io_set_flag(io, DM_IO_ACCOUNTED); 555 spin_unlock_irqrestore(&io->lock, flags); 556 } 557 558 __dm_start_io_acct(io, bio); 559 } 560 561 static void dm_end_io_acct(struct dm_io *io, struct bio *bio) 562 { 563 dm_io_acct(true, io->md, bio, io->start_time, &io->stats_aux); 564 } 565 566 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 567 { 568 struct dm_io *io; 569 struct dm_target_io *tio; 570 struct bio *clone; 571 572 clone = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, &md->io_bs); 573 574 tio = clone_to_tio(clone); 575 tio->flags = 0; 576 dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO); 577 tio->io = NULL; 578 579 io = container_of(tio, struct dm_io, tio); 580 io->magic = DM_IO_MAGIC; 581 io->status = 0; 582 atomic_set(&io->io_count, 1); 583 this_cpu_inc(*md->pending_io); 584 io->orig_bio = NULL; 585 io->md = md; 586 io->map_task = current; 587 spin_lock_init(&io->lock); 588 io->start_time = jiffies; 589 io->flags = 0; 590 591 dm_stats_record_start(&md->stats, &io->stats_aux); 592 593 return io; 594 } 595 596 static void free_io(struct dm_io *io) 597 { 598 bio_put(&io->tio.clone); 599 } 600 601 static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, 602 unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask) 603 { 604 struct dm_target_io *tio; 605 struct bio *clone; 606 607 if (!ci->io->tio.io) { 608 /* the dm_target_io embedded in ci->io is available */ 609 tio = &ci->io->tio; 610 /* alloc_io() already initialized embedded clone */ 611 clone = &tio->clone; 612 } else { 613 clone = bio_alloc_clone(ci->bio->bi_bdev, ci->bio, 614 gfp_mask, &ci->io->md->bs); 615 if (!clone) 616 return NULL; 617 618 /* REQ_DM_POLL_LIST shouldn't be inherited */ 619 clone->bi_opf &= ~REQ_DM_POLL_LIST; 620 621 tio = clone_to_tio(clone); 622 tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */ 623 } 624 625 tio->magic = DM_TIO_MAGIC; 626 tio->io = ci->io; 627 tio->ti = ti; 628 tio->target_bio_nr = target_bio_nr; 629 tio->len_ptr = len; 630 tio->old_sector = 0; 631 632 if (len) { 633 clone->bi_iter.bi_size = to_bytes(*len); 634 if (bio_integrity(clone)) 635 bio_integrity_trim(clone); 636 } 637 638 return clone; 639 } 640 641 static void free_tio(struct bio *clone) 642 { 643 if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO)) 644 return; 645 bio_put(clone); 646 } 647 648 /* 649 * Add the bio to the list of deferred io. 650 */ 651 static void queue_io(struct mapped_device *md, struct bio *bio) 652 { 653 unsigned long flags; 654 655 spin_lock_irqsave(&md->deferred_lock, flags); 656 bio_list_add(&md->deferred, bio); 657 spin_unlock_irqrestore(&md->deferred_lock, flags); 658 queue_work(md->wq, &md->work); 659 } 660 661 /* 662 * Everyone (including functions in this file), should use this 663 * function to access the md->map field, and make sure they call 664 * dm_put_live_table() when finished. 665 */ 666 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 667 { 668 *srcu_idx = srcu_read_lock(&md->io_barrier); 669 670 return srcu_dereference(md->map, &md->io_barrier); 671 } 672 673 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 674 { 675 srcu_read_unlock(&md->io_barrier, srcu_idx); 676 } 677 678 void dm_sync_table(struct mapped_device *md) 679 { 680 synchronize_srcu(&md->io_barrier); 681 synchronize_rcu_expedited(); 682 } 683 684 /* 685 * A fast alternative to dm_get_live_table/dm_put_live_table. 686 * The caller must not block between these two functions. 687 */ 688 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 689 { 690 rcu_read_lock(); 691 return rcu_dereference(md->map); 692 } 693 694 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 695 { 696 rcu_read_unlock(); 697 } 698 699 static char *_dm_claim_ptr = "I belong to device-mapper"; 700 701 /* 702 * Open a table device so we can use it as a map destination. 703 */ 704 static int open_table_device(struct table_device *td, dev_t dev, 705 struct mapped_device *md) 706 { 707 struct block_device *bdev; 708 u64 part_off; 709 int r; 710 711 BUG_ON(td->dm_dev.bdev); 712 713 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); 714 if (IS_ERR(bdev)) 715 return PTR_ERR(bdev); 716 717 r = bd_link_disk_holder(bdev, dm_disk(md)); 718 if (r) { 719 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 720 return r; 721 } 722 723 td->dm_dev.bdev = bdev; 724 td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off); 725 return 0; 726 } 727 728 /* 729 * Close a table device that we've been using. 730 */ 731 static void close_table_device(struct table_device *td, struct mapped_device *md) 732 { 733 if (!td->dm_dev.bdev) 734 return; 735 736 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 737 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 738 put_dax(td->dm_dev.dax_dev); 739 td->dm_dev.bdev = NULL; 740 td->dm_dev.dax_dev = NULL; 741 } 742 743 static struct table_device *find_table_device(struct list_head *l, dev_t dev, 744 fmode_t mode) 745 { 746 struct table_device *td; 747 748 list_for_each_entry(td, l, list) 749 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 750 return td; 751 752 return NULL; 753 } 754 755 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 756 struct dm_dev **result) 757 { 758 int r; 759 struct table_device *td; 760 761 mutex_lock(&md->table_devices_lock); 762 td = find_table_device(&md->table_devices, dev, mode); 763 if (!td) { 764 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 765 if (!td) { 766 mutex_unlock(&md->table_devices_lock); 767 return -ENOMEM; 768 } 769 770 td->dm_dev.mode = mode; 771 td->dm_dev.bdev = NULL; 772 773 if ((r = open_table_device(td, dev, md))) { 774 mutex_unlock(&md->table_devices_lock); 775 kfree(td); 776 return r; 777 } 778 779 format_dev_t(td->dm_dev.name, dev); 780 781 refcount_set(&td->count, 1); 782 list_add(&td->list, &md->table_devices); 783 } else { 784 refcount_inc(&td->count); 785 } 786 mutex_unlock(&md->table_devices_lock); 787 788 *result = &td->dm_dev; 789 return 0; 790 } 791 792 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 793 { 794 struct table_device *td = container_of(d, struct table_device, dm_dev); 795 796 mutex_lock(&md->table_devices_lock); 797 if (refcount_dec_and_test(&td->count)) { 798 close_table_device(td, md); 799 list_del(&td->list); 800 kfree(td); 801 } 802 mutex_unlock(&md->table_devices_lock); 803 } 804 805 static void free_table_devices(struct list_head *devices) 806 { 807 struct list_head *tmp, *next; 808 809 list_for_each_safe(tmp, next, devices) { 810 struct table_device *td = list_entry(tmp, struct table_device, list); 811 812 DMWARN("dm_destroy: %s still exists with %d references", 813 td->dm_dev.name, refcount_read(&td->count)); 814 kfree(td); 815 } 816 } 817 818 /* 819 * Get the geometry associated with a dm device 820 */ 821 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 822 { 823 *geo = md->geometry; 824 825 return 0; 826 } 827 828 /* 829 * Set the geometry of a device. 830 */ 831 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 832 { 833 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 834 835 if (geo->start > sz) { 836 DMWARN("Start sector is beyond the geometry limits."); 837 return -EINVAL; 838 } 839 840 md->geometry = *geo; 841 842 return 0; 843 } 844 845 static int __noflush_suspending(struct mapped_device *md) 846 { 847 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 848 } 849 850 static void dm_io_complete(struct dm_io *io) 851 { 852 blk_status_t io_error; 853 struct mapped_device *md = io->md; 854 struct bio *bio = io->orig_bio; 855 856 if (io->status == BLK_STS_DM_REQUEUE) { 857 unsigned long flags; 858 /* 859 * Target requested pushing back the I/O. 860 */ 861 spin_lock_irqsave(&md->deferred_lock, flags); 862 if (__noflush_suspending(md) && 863 !WARN_ON_ONCE(dm_is_zone_write(md, bio))) { 864 /* NOTE early return due to BLK_STS_DM_REQUEUE below */ 865 bio_list_add_head(&md->deferred, bio); 866 } else { 867 /* 868 * noflush suspend was interrupted or this is 869 * a write to a zoned target. 870 */ 871 io->status = BLK_STS_IOERR; 872 } 873 spin_unlock_irqrestore(&md->deferred_lock, flags); 874 } 875 876 io_error = io->status; 877 if (dm_io_flagged(io, DM_IO_ACCOUNTED)) 878 dm_end_io_acct(io, bio); 879 else if (!io_error) { 880 /* 881 * Must handle target that DM_MAPIO_SUBMITTED only to 882 * then bio_endio() rather than dm_submit_bio_remap() 883 */ 884 __dm_start_io_acct(io, bio); 885 dm_end_io_acct(io, bio); 886 } 887 free_io(io); 888 smp_wmb(); 889 this_cpu_dec(*md->pending_io); 890 891 /* nudge anyone waiting on suspend queue */ 892 if (unlikely(wq_has_sleeper(&md->wait))) 893 wake_up(&md->wait); 894 895 if (io_error == BLK_STS_DM_REQUEUE) { 896 /* 897 * Upper layer won't help us poll split bio, io->orig_bio 898 * may only reflect a subset of the pre-split original, 899 * so clear REQ_POLLED in case of requeue 900 */ 901 bio->bi_opf &= ~REQ_POLLED; 902 return; 903 } 904 905 if (bio_is_flush_with_data(bio)) { 906 /* 907 * Preflush done for flush with data, reissue 908 * without REQ_PREFLUSH. 909 */ 910 bio->bi_opf &= ~REQ_PREFLUSH; 911 queue_io(md, bio); 912 } else { 913 /* done with normal IO or empty flush */ 914 if (io_error) 915 bio->bi_status = io_error; 916 bio_endio(bio); 917 } 918 } 919 920 static inline bool dm_tio_is_normal(struct dm_target_io *tio) 921 { 922 return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) && 923 !dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); 924 } 925 926 /* 927 * Decrements the number of outstanding ios that a bio has been 928 * cloned into, completing the original io if necc. 929 */ 930 void dm_io_dec_pending(struct dm_io *io, blk_status_t error) 931 { 932 /* Push-back supersedes any I/O errors */ 933 if (unlikely(error)) { 934 unsigned long flags; 935 spin_lock_irqsave(&io->lock, flags); 936 if (!(io->status == BLK_STS_DM_REQUEUE && 937 __noflush_suspending(io->md))) 938 io->status = error; 939 spin_unlock_irqrestore(&io->lock, flags); 940 } 941 942 if (atomic_dec_and_test(&io->io_count)) 943 dm_io_complete(io); 944 } 945 946 void disable_discard(struct mapped_device *md) 947 { 948 struct queue_limits *limits = dm_get_queue_limits(md); 949 950 /* device doesn't really support DISCARD, disable it */ 951 limits->max_discard_sectors = 0; 952 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); 953 } 954 955 void disable_write_zeroes(struct mapped_device *md) 956 { 957 struct queue_limits *limits = dm_get_queue_limits(md); 958 959 /* device doesn't really support WRITE ZEROES, disable it */ 960 limits->max_write_zeroes_sectors = 0; 961 } 962 963 static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) 964 { 965 return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); 966 } 967 968 static void clone_endio(struct bio *bio) 969 { 970 blk_status_t error = bio->bi_status; 971 struct dm_target_io *tio = clone_to_tio(bio); 972 struct dm_io *io = tio->io; 973 struct mapped_device *md = tio->io->md; 974 dm_endio_fn endio = tio->ti->type->end_io; 975 struct request_queue *q = bio->bi_bdev->bd_disk->queue; 976 977 if (unlikely(error == BLK_STS_TARGET)) { 978 if (bio_op(bio) == REQ_OP_DISCARD && 979 !q->limits.max_discard_sectors) 980 disable_discard(md); 981 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 982 !q->limits.max_write_zeroes_sectors) 983 disable_write_zeroes(md); 984 } 985 986 if (blk_queue_is_zoned(q)) 987 dm_zone_endio(io, bio); 988 989 if (endio) { 990 int r = endio(tio->ti, bio, &error); 991 switch (r) { 992 case DM_ENDIO_REQUEUE: 993 /* 994 * Requeuing writes to a sequential zone of a zoned 995 * target will break the sequential write pattern: 996 * fail such IO. 997 */ 998 if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) 999 error = BLK_STS_IOERR; 1000 else 1001 error = BLK_STS_DM_REQUEUE; 1002 fallthrough; 1003 case DM_ENDIO_DONE: 1004 break; 1005 case DM_ENDIO_INCOMPLETE: 1006 /* The target will handle the io */ 1007 return; 1008 default: 1009 DMWARN("unimplemented target endio return value: %d", r); 1010 BUG(); 1011 } 1012 } 1013 1014 if (unlikely(swap_bios_limit(tio->ti, bio))) { 1015 struct mapped_device *md = io->md; 1016 up(&md->swap_bios_semaphore); 1017 } 1018 1019 free_tio(bio); 1020 dm_io_dec_pending(io, error); 1021 } 1022 1023 /* 1024 * Return maximum size of I/O possible at the supplied sector up to the current 1025 * target boundary. 1026 */ 1027 static inline sector_t max_io_len_target_boundary(struct dm_target *ti, 1028 sector_t target_offset) 1029 { 1030 return ti->len - target_offset; 1031 } 1032 1033 static sector_t max_io_len(struct dm_target *ti, sector_t sector) 1034 { 1035 sector_t target_offset = dm_target_offset(ti, sector); 1036 sector_t len = max_io_len_target_boundary(ti, target_offset); 1037 sector_t max_len; 1038 1039 /* 1040 * Does the target need to split IO even further? 1041 * - varied (per target) IO splitting is a tenet of DM; this 1042 * explains why stacked chunk_sectors based splitting via 1043 * blk_max_size_offset() isn't possible here. So pass in 1044 * ti->max_io_len to override stacked chunk_sectors. 1045 */ 1046 if (ti->max_io_len) { 1047 max_len = blk_max_size_offset(ti->table->md->queue, 1048 target_offset, ti->max_io_len); 1049 if (len > max_len) 1050 len = max_len; 1051 } 1052 1053 return len; 1054 } 1055 1056 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1057 { 1058 if (len > UINT_MAX) { 1059 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1060 (unsigned long long)len, UINT_MAX); 1061 ti->error = "Maximum size of target IO is too large"; 1062 return -EINVAL; 1063 } 1064 1065 ti->max_io_len = (uint32_t) len; 1066 1067 return 0; 1068 } 1069 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1070 1071 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1072 sector_t sector, int *srcu_idx) 1073 __acquires(md->io_barrier) 1074 { 1075 struct dm_table *map; 1076 struct dm_target *ti; 1077 1078 map = dm_get_live_table(md, srcu_idx); 1079 if (!map) 1080 return NULL; 1081 1082 ti = dm_table_find_target(map, sector); 1083 if (!ti) 1084 return NULL; 1085 1086 return ti; 1087 } 1088 1089 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1090 long nr_pages, void **kaddr, pfn_t *pfn) 1091 { 1092 struct mapped_device *md = dax_get_private(dax_dev); 1093 sector_t sector = pgoff * PAGE_SECTORS; 1094 struct dm_target *ti; 1095 long len, ret = -EIO; 1096 int srcu_idx; 1097 1098 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1099 1100 if (!ti) 1101 goto out; 1102 if (!ti->type->direct_access) 1103 goto out; 1104 len = max_io_len(ti, sector) / PAGE_SECTORS; 1105 if (len < 1) 1106 goto out; 1107 nr_pages = min(len, nr_pages); 1108 ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 1109 1110 out: 1111 dm_put_live_table(md, srcu_idx); 1112 1113 return ret; 1114 } 1115 1116 static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 1117 size_t nr_pages) 1118 { 1119 struct mapped_device *md = dax_get_private(dax_dev); 1120 sector_t sector = pgoff * PAGE_SECTORS; 1121 struct dm_target *ti; 1122 int ret = -EIO; 1123 int srcu_idx; 1124 1125 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1126 1127 if (!ti) 1128 goto out; 1129 if (WARN_ON(!ti->type->dax_zero_page_range)) { 1130 /* 1131 * ->zero_page_range() is mandatory dax operation. If we are 1132 * here, something is wrong. 1133 */ 1134 goto out; 1135 } 1136 ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); 1137 out: 1138 dm_put_live_table(md, srcu_idx); 1139 1140 return ret; 1141 } 1142 1143 /* 1144 * A target may call dm_accept_partial_bio only from the map routine. It is 1145 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management 1146 * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by 1147 * __send_duplicate_bios(). 1148 * 1149 * dm_accept_partial_bio informs the dm that the target only wants to process 1150 * additional n_sectors sectors of the bio and the rest of the data should be 1151 * sent in a next bio. 1152 * 1153 * A diagram that explains the arithmetics: 1154 * +--------------------+---------------+-------+ 1155 * | 1 | 2 | 3 | 1156 * +--------------------+---------------+-------+ 1157 * 1158 * <-------------- *tio->len_ptr ---------------> 1159 * <------- bi_size -------> 1160 * <-- n_sectors --> 1161 * 1162 * Region 1 was already iterated over with bio_advance or similar function. 1163 * (it may be empty if the target doesn't use bio_advance) 1164 * Region 2 is the remaining bio size that the target wants to process. 1165 * (it may be empty if region 1 is non-empty, although there is no reason 1166 * to make it empty) 1167 * The target requires that region 3 is to be sent in the next bio. 1168 * 1169 * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 1170 * the partially processed part (the sum of regions 1+2) must be the same for all 1171 * copies of the bio. 1172 */ 1173 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 1174 { 1175 struct dm_target_io *tio = clone_to_tio(bio); 1176 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 1177 1178 BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); 1179 BUG_ON(op_is_zone_mgmt(bio_op(bio))); 1180 BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND); 1181 BUG_ON(bi_size > *tio->len_ptr); 1182 BUG_ON(n_sectors > bi_size); 1183 1184 *tio->len_ptr -= bi_size - n_sectors; 1185 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1186 } 1187 EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1188 1189 static inline void __dm_submit_bio_remap(struct bio *clone, 1190 dev_t dev, sector_t old_sector) 1191 { 1192 trace_block_bio_remap(clone, dev, old_sector); 1193 submit_bio_noacct(clone); 1194 } 1195 1196 /* 1197 * @clone: clone bio that DM core passed to target's .map function 1198 * @tgt_clone: clone of @clone bio that target needs submitted 1199 * 1200 * Targets should use this interface to submit bios they take 1201 * ownership of when returning DM_MAPIO_SUBMITTED. 1202 * 1203 * Target should also enable ti->accounts_remapped_io 1204 */ 1205 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone) 1206 { 1207 struct dm_target_io *tio = clone_to_tio(clone); 1208 struct dm_io *io = tio->io; 1209 1210 WARN_ON_ONCE(!tio->ti->accounts_remapped_io); 1211 1212 /* establish bio that will get submitted */ 1213 if (!tgt_clone) 1214 tgt_clone = clone; 1215 1216 /* 1217 * Account io->origin_bio to DM dev on behalf of target 1218 * that took ownership of IO with DM_MAPIO_SUBMITTED. 1219 */ 1220 if (io->map_task == current) { 1221 /* Still in target's map function */ 1222 dm_io_set_flag(io, DM_IO_START_ACCT); 1223 } else { 1224 /* 1225 * Called by another thread, managed by DM target, 1226 * wait for dm_split_and_process_bio() to store 1227 * io->orig_bio 1228 */ 1229 while (unlikely(!smp_load_acquire(&io->orig_bio))) 1230 msleep(1); 1231 dm_start_io_acct(io, clone); 1232 } 1233 1234 __dm_submit_bio_remap(tgt_clone, disk_devt(io->md->disk), 1235 tio->old_sector); 1236 } 1237 EXPORT_SYMBOL_GPL(dm_submit_bio_remap); 1238 1239 static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) 1240 { 1241 mutex_lock(&md->swap_bios_lock); 1242 while (latch < md->swap_bios) { 1243 cond_resched(); 1244 down(&md->swap_bios_semaphore); 1245 md->swap_bios--; 1246 } 1247 while (latch > md->swap_bios) { 1248 cond_resched(); 1249 up(&md->swap_bios_semaphore); 1250 md->swap_bios++; 1251 } 1252 mutex_unlock(&md->swap_bios_lock); 1253 } 1254 1255 static void __map_bio(struct bio *clone) 1256 { 1257 struct dm_target_io *tio = clone_to_tio(clone); 1258 int r; 1259 struct dm_io *io = tio->io; 1260 struct dm_target *ti = tio->ti; 1261 1262 clone->bi_end_io = clone_endio; 1263 1264 /* 1265 * Map the clone. 1266 */ 1267 dm_io_inc_pending(io); 1268 tio->old_sector = clone->bi_iter.bi_sector; 1269 1270 if (unlikely(swap_bios_limit(ti, clone))) { 1271 struct mapped_device *md = io->md; 1272 int latch = get_swap_bios(); 1273 if (unlikely(latch != md->swap_bios)) 1274 __set_swap_bios_limit(md, latch); 1275 down(&md->swap_bios_semaphore); 1276 } 1277 1278 /* 1279 * Check if the IO needs a special mapping due to zone append emulation 1280 * on zoned target. In this case, dm_zone_map_bio() calls the target 1281 * map operation. 1282 */ 1283 if (dm_emulate_zone_append(io->md)) 1284 r = dm_zone_map_bio(tio); 1285 else 1286 r = ti->type->map(ti, clone); 1287 1288 switch (r) { 1289 case DM_MAPIO_SUBMITTED: 1290 /* target has assumed ownership of this io */ 1291 if (!ti->accounts_remapped_io) 1292 dm_io_set_flag(io, DM_IO_START_ACCT); 1293 break; 1294 case DM_MAPIO_REMAPPED: 1295 /* 1296 * the bio has been remapped so dispatch it, but defer 1297 * dm_start_io_acct() until after possible bio_split(). 1298 */ 1299 __dm_submit_bio_remap(clone, disk_devt(io->md->disk), 1300 tio->old_sector); 1301 dm_io_set_flag(io, DM_IO_START_ACCT); 1302 break; 1303 case DM_MAPIO_KILL: 1304 case DM_MAPIO_REQUEUE: 1305 if (unlikely(swap_bios_limit(ti, clone))) 1306 up(&io->md->swap_bios_semaphore); 1307 free_tio(clone); 1308 if (r == DM_MAPIO_KILL) 1309 dm_io_dec_pending(io, BLK_STS_IOERR); 1310 else 1311 dm_io_dec_pending(io, BLK_STS_DM_REQUEUE); 1312 break; 1313 default: 1314 DMWARN("unimplemented target map return value: %d", r); 1315 BUG(); 1316 } 1317 } 1318 1319 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1320 struct dm_target *ti, unsigned num_bios, 1321 unsigned *len) 1322 { 1323 struct bio *bio; 1324 int try; 1325 1326 for (try = 0; try < 2; try++) { 1327 int bio_nr; 1328 1329 if (try) 1330 mutex_lock(&ci->io->md->table_devices_lock); 1331 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1332 bio = alloc_tio(ci, ti, bio_nr, len, 1333 try ? GFP_NOIO : GFP_NOWAIT); 1334 if (!bio) 1335 break; 1336 1337 bio_list_add(blist, bio); 1338 } 1339 if (try) 1340 mutex_unlock(&ci->io->md->table_devices_lock); 1341 if (bio_nr == num_bios) 1342 return; 1343 1344 while ((bio = bio_list_pop(blist))) 1345 free_tio(bio); 1346 } 1347 } 1348 1349 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1350 unsigned num_bios, unsigned *len) 1351 { 1352 struct bio_list blist = BIO_EMPTY_LIST; 1353 struct bio *clone; 1354 1355 switch (num_bios) { 1356 case 0: 1357 break; 1358 case 1: 1359 clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); 1360 dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); 1361 __map_bio(clone); 1362 break; 1363 default: 1364 alloc_multiple_bios(&blist, ci, ti, num_bios, len); 1365 while ((clone = bio_list_pop(&blist))) { 1366 dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); 1367 __map_bio(clone); 1368 } 1369 break; 1370 } 1371 } 1372 1373 static void __send_empty_flush(struct clone_info *ci) 1374 { 1375 unsigned target_nr = 0; 1376 struct dm_target *ti; 1377 struct bio flush_bio; 1378 1379 /* 1380 * Use an on-stack bio for this, it's safe since we don't 1381 * need to reference it after submit. It's just used as 1382 * the basis for the clone(s). 1383 */ 1384 bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, 1385 REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC); 1386 1387 ci->bio = &flush_bio; 1388 ci->sector_count = 0; 1389 1390 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1391 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1392 1393 bio_uninit(ci->bio); 1394 } 1395 1396 static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 1397 unsigned num_bios) 1398 { 1399 unsigned len; 1400 1401 len = min_t(sector_t, ci->sector_count, 1402 max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); 1403 1404 /* 1405 * dm_accept_partial_bio cannot be used with duplicate bios, 1406 * so update clone_info cursor before __send_duplicate_bios(). 1407 */ 1408 ci->sector += len; 1409 ci->sector_count -= len; 1410 1411 __send_duplicate_bios(ci, ti, num_bios, &len); 1412 } 1413 1414 static bool is_abnormal_io(struct bio *bio) 1415 { 1416 bool r = false; 1417 1418 switch (bio_op(bio)) { 1419 case REQ_OP_DISCARD: 1420 case REQ_OP_SECURE_ERASE: 1421 case REQ_OP_WRITE_ZEROES: 1422 r = true; 1423 break; 1424 } 1425 1426 return r; 1427 } 1428 1429 static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, 1430 int *result) 1431 { 1432 unsigned num_bios = 0; 1433 1434 switch (bio_op(ci->bio)) { 1435 case REQ_OP_DISCARD: 1436 num_bios = ti->num_discard_bios; 1437 break; 1438 case REQ_OP_SECURE_ERASE: 1439 num_bios = ti->num_secure_erase_bios; 1440 break; 1441 case REQ_OP_WRITE_ZEROES: 1442 num_bios = ti->num_write_zeroes_bios; 1443 break; 1444 default: 1445 return false; 1446 } 1447 1448 /* 1449 * Even though the device advertised support for this type of 1450 * request, that does not mean every target supports it, and 1451 * reconfiguration might also have changed that since the 1452 * check was performed. 1453 */ 1454 if (!num_bios) 1455 *result = -EOPNOTSUPP; 1456 else { 1457 __send_changing_extent_only(ci, ti, num_bios); 1458 *result = 0; 1459 } 1460 return true; 1461 } 1462 1463 /* 1464 * Reuse ->bi_private as hlist head for storing all dm_io instances 1465 * associated with this bio, and this bio's bi_private needs to be 1466 * stored in dm_io->data before the reuse. 1467 * 1468 * bio->bi_private is owned by fs or upper layer, so block layer won't 1469 * touch it after splitting. Meantime it won't be changed by anyone after 1470 * bio is submitted. So this reuse is safe. 1471 */ 1472 static inline struct hlist_head *dm_get_bio_hlist_head(struct bio *bio) 1473 { 1474 return (struct hlist_head *)&bio->bi_private; 1475 } 1476 1477 static void dm_queue_poll_io(struct bio *bio, struct dm_io *io) 1478 { 1479 struct hlist_head *head = dm_get_bio_hlist_head(bio); 1480 1481 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) { 1482 bio->bi_opf |= REQ_DM_POLL_LIST; 1483 /* 1484 * Save .bi_private into dm_io, so that we can reuse 1485 * .bi_private as hlist head for storing dm_io list 1486 */ 1487 io->data = bio->bi_private; 1488 1489 INIT_HLIST_HEAD(head); 1490 1491 /* tell block layer to poll for completion */ 1492 bio->bi_cookie = ~BLK_QC_T_NONE; 1493 } else { 1494 /* 1495 * bio recursed due to split, reuse original poll list, 1496 * and save bio->bi_private too. 1497 */ 1498 io->data = hlist_entry(head->first, struct dm_io, node)->data; 1499 } 1500 1501 hlist_add_head(&io->node, head); 1502 } 1503 1504 /* 1505 * Select the correct strategy for processing a non-flush bio. 1506 */ 1507 static int __split_and_process_bio(struct clone_info *ci) 1508 { 1509 struct bio *clone; 1510 struct dm_target *ti; 1511 unsigned len; 1512 int r; 1513 1514 ti = dm_table_find_target(ci->map, ci->sector); 1515 if (!ti) 1516 return -EIO; 1517 1518 if (__process_abnormal_io(ci, ti, &r)) 1519 return r; 1520 1521 /* 1522 * Only support bio polling for normal IO, and the target io is 1523 * exactly inside the dm_io instance (verified in dm_poll_dm_io) 1524 */ 1525 ci->submit_as_polled = ci->bio->bi_opf & REQ_POLLED; 1526 1527 len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); 1528 clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); 1529 __map_bio(clone); 1530 1531 ci->sector += len; 1532 ci->sector_count -= len; 1533 1534 return 0; 1535 } 1536 1537 static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 1538 struct dm_table *map, struct bio *bio) 1539 { 1540 ci->map = map; 1541 ci->io = alloc_io(md, bio); 1542 ci->bio = bio; 1543 ci->submit_as_polled = false; 1544 ci->sector = bio->bi_iter.bi_sector; 1545 ci->sector_count = bio_sectors(bio); 1546 1547 /* Shouldn't happen but sector_count was being set to 0 so... */ 1548 if (WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count)) 1549 ci->sector_count = 0; 1550 } 1551 1552 /* 1553 * Entry point to split a bio into clones and submit them to the targets. 1554 */ 1555 static void dm_split_and_process_bio(struct mapped_device *md, 1556 struct dm_table *map, struct bio *bio) 1557 { 1558 struct clone_info ci; 1559 struct bio *orig_bio = NULL; 1560 int error = 0; 1561 1562 init_clone_info(&ci, md, map, bio); 1563 1564 if (bio->bi_opf & REQ_PREFLUSH) { 1565 __send_empty_flush(&ci); 1566 /* dm_io_complete submits any data associated with flush */ 1567 goto out; 1568 } 1569 1570 error = __split_and_process_bio(&ci); 1571 ci.io->map_task = NULL; 1572 if (error || !ci.sector_count) 1573 goto out; 1574 1575 /* 1576 * Remainder must be passed to submit_bio_noacct() so it gets handled 1577 * *after* bios already submitted have been completely processed. 1578 * We take a clone of the original to store in ci.io->orig_bio to be 1579 * used by dm_end_io_acct() and for dm_io_complete() to use for 1580 * completion handling. 1581 */ 1582 orig_bio = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1583 GFP_NOIO, &md->queue->bio_split); 1584 bio_chain(orig_bio, bio); 1585 trace_block_split(orig_bio, bio->bi_iter.bi_sector); 1586 submit_bio_noacct(bio); 1587 out: 1588 if (!orig_bio) 1589 orig_bio = bio; 1590 smp_store_release(&ci.io->orig_bio, orig_bio); 1591 if (dm_io_flagged(ci.io, DM_IO_START_ACCT)) 1592 dm_start_io_acct(ci.io, NULL); 1593 1594 /* 1595 * Drop the extra reference count for non-POLLED bio, and hold one 1596 * reference for POLLED bio, which will be released in dm_poll_bio 1597 * 1598 * Add every dm_io instance into the hlist_head which is stored in 1599 * bio->bi_private, so that dm_poll_bio can poll them all. 1600 */ 1601 if (error || !ci.submit_as_polled) 1602 dm_io_dec_pending(ci.io, errno_to_blk_status(error)); 1603 else 1604 dm_queue_poll_io(bio, ci.io); 1605 } 1606 1607 static void dm_submit_bio(struct bio *bio) 1608 { 1609 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; 1610 int srcu_idx; 1611 struct dm_table *map; 1612 1613 map = dm_get_live_table(md, &srcu_idx); 1614 1615 /* If suspended, or map not yet available, queue this IO for later */ 1616 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || 1617 unlikely(!map)) { 1618 if (bio->bi_opf & REQ_NOWAIT) 1619 bio_wouldblock_error(bio); 1620 else if (bio->bi_opf & REQ_RAHEAD) 1621 bio_io_error(bio); 1622 else 1623 queue_io(md, bio); 1624 goto out; 1625 } 1626 1627 /* 1628 * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc) 1629 * otherwise associated queue_limits won't be imposed. 1630 */ 1631 if (is_abnormal_io(bio)) 1632 blk_queue_split(&bio); 1633 1634 dm_split_and_process_bio(md, map, bio); 1635 out: 1636 dm_put_live_table(md, srcu_idx); 1637 } 1638 1639 static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob, 1640 unsigned int flags) 1641 { 1642 WARN_ON_ONCE(!dm_tio_is_normal(&io->tio)); 1643 1644 /* don't poll if the mapped io is done */ 1645 if (atomic_read(&io->io_count) > 1) 1646 bio_poll(&io->tio.clone, iob, flags); 1647 1648 /* bio_poll holds the last reference */ 1649 return atomic_read(&io->io_count) == 1; 1650 } 1651 1652 static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob, 1653 unsigned int flags) 1654 { 1655 struct hlist_head *head = dm_get_bio_hlist_head(bio); 1656 struct hlist_head tmp = HLIST_HEAD_INIT; 1657 struct hlist_node *next; 1658 struct dm_io *io; 1659 1660 /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */ 1661 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) 1662 return 0; 1663 1664 WARN_ON_ONCE(hlist_empty(head)); 1665 1666 hlist_move_list(head, &tmp); 1667 1668 /* 1669 * Restore .bi_private before possibly completing dm_io. 1670 * 1671 * bio_poll() is only possible once @bio has been completely 1672 * submitted via submit_bio_noacct()'s depth-first submission. 1673 * So there is no dm_queue_poll_io() race associated with 1674 * clearing REQ_DM_POLL_LIST here. 1675 */ 1676 bio->bi_opf &= ~REQ_DM_POLL_LIST; 1677 bio->bi_private = hlist_entry(tmp.first, struct dm_io, node)->data; 1678 1679 hlist_for_each_entry_safe(io, next, &tmp, node) { 1680 if (dm_poll_dm_io(io, iob, flags)) { 1681 hlist_del_init(&io->node); 1682 /* 1683 * clone_endio() has already occurred, so passing 1684 * error as 0 here doesn't override io->status 1685 */ 1686 dm_io_dec_pending(io, 0); 1687 } 1688 } 1689 1690 /* Not done? */ 1691 if (!hlist_empty(&tmp)) { 1692 bio->bi_opf |= REQ_DM_POLL_LIST; 1693 /* Reset bio->bi_private to dm_io list head */ 1694 hlist_move_list(&tmp, head); 1695 return 0; 1696 } 1697 return 1; 1698 } 1699 1700 /*----------------------------------------------------------------- 1701 * An IDR is used to keep track of allocated minor numbers. 1702 *---------------------------------------------------------------*/ 1703 static void free_minor(int minor) 1704 { 1705 spin_lock(&_minor_lock); 1706 idr_remove(&_minor_idr, minor); 1707 spin_unlock(&_minor_lock); 1708 } 1709 1710 /* 1711 * See if the device with a specific minor # is free. 1712 */ 1713 static int specific_minor(int minor) 1714 { 1715 int r; 1716 1717 if (minor >= (1 << MINORBITS)) 1718 return -EINVAL; 1719 1720 idr_preload(GFP_KERNEL); 1721 spin_lock(&_minor_lock); 1722 1723 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 1724 1725 spin_unlock(&_minor_lock); 1726 idr_preload_end(); 1727 if (r < 0) 1728 return r == -ENOSPC ? -EBUSY : r; 1729 return 0; 1730 } 1731 1732 static int next_free_minor(int *minor) 1733 { 1734 int r; 1735 1736 idr_preload(GFP_KERNEL); 1737 spin_lock(&_minor_lock); 1738 1739 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 1740 1741 spin_unlock(&_minor_lock); 1742 idr_preload_end(); 1743 if (r < 0) 1744 return r; 1745 *minor = r; 1746 return 0; 1747 } 1748 1749 static const struct block_device_operations dm_blk_dops; 1750 static const struct block_device_operations dm_rq_blk_dops; 1751 static const struct dax_operations dm_dax_ops; 1752 1753 static void dm_wq_work(struct work_struct *work); 1754 1755 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 1756 static void dm_queue_destroy_crypto_profile(struct request_queue *q) 1757 { 1758 dm_destroy_crypto_profile(q->crypto_profile); 1759 } 1760 1761 #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1762 1763 static inline void dm_queue_destroy_crypto_profile(struct request_queue *q) 1764 { 1765 } 1766 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ 1767 1768 static void cleanup_mapped_device(struct mapped_device *md) 1769 { 1770 if (md->wq) 1771 destroy_workqueue(md->wq); 1772 bioset_exit(&md->bs); 1773 bioset_exit(&md->io_bs); 1774 1775 if (md->dax_dev) { 1776 dax_remove_host(md->disk); 1777 kill_dax(md->dax_dev); 1778 put_dax(md->dax_dev); 1779 md->dax_dev = NULL; 1780 } 1781 1782 dm_cleanup_zoned_dev(md); 1783 if (md->disk) { 1784 spin_lock(&_minor_lock); 1785 md->disk->private_data = NULL; 1786 spin_unlock(&_minor_lock); 1787 if (dm_get_md_type(md) != DM_TYPE_NONE) { 1788 dm_sysfs_exit(md); 1789 del_gendisk(md->disk); 1790 } 1791 dm_queue_destroy_crypto_profile(md->queue); 1792 blk_cleanup_disk(md->disk); 1793 } 1794 1795 if (md->pending_io) { 1796 free_percpu(md->pending_io); 1797 md->pending_io = NULL; 1798 } 1799 1800 cleanup_srcu_struct(&md->io_barrier); 1801 1802 mutex_destroy(&md->suspend_lock); 1803 mutex_destroy(&md->type_lock); 1804 mutex_destroy(&md->table_devices_lock); 1805 mutex_destroy(&md->swap_bios_lock); 1806 1807 dm_mq_cleanup_mapped_device(md); 1808 } 1809 1810 /* 1811 * Allocate and initialise a blank device with a given minor. 1812 */ 1813 static struct mapped_device *alloc_dev(int minor) 1814 { 1815 int r, numa_node_id = dm_get_numa_node(); 1816 struct mapped_device *md; 1817 void *old_md; 1818 1819 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 1820 if (!md) { 1821 DMWARN("unable to allocate device, out of memory."); 1822 return NULL; 1823 } 1824 1825 if (!try_module_get(THIS_MODULE)) 1826 goto bad_module_get; 1827 1828 /* get a minor number for the dev */ 1829 if (minor == DM_ANY_MINOR) 1830 r = next_free_minor(&minor); 1831 else 1832 r = specific_minor(minor); 1833 if (r < 0) 1834 goto bad_minor; 1835 1836 r = init_srcu_struct(&md->io_barrier); 1837 if (r < 0) 1838 goto bad_io_barrier; 1839 1840 md->numa_node_id = numa_node_id; 1841 md->init_tio_pdu = false; 1842 md->type = DM_TYPE_NONE; 1843 mutex_init(&md->suspend_lock); 1844 mutex_init(&md->type_lock); 1845 mutex_init(&md->table_devices_lock); 1846 spin_lock_init(&md->deferred_lock); 1847 atomic_set(&md->holders, 1); 1848 atomic_set(&md->open_count, 0); 1849 atomic_set(&md->event_nr, 0); 1850 atomic_set(&md->uevent_seq, 0); 1851 INIT_LIST_HEAD(&md->uevent_list); 1852 INIT_LIST_HEAD(&md->table_devices); 1853 spin_lock_init(&md->uevent_lock); 1854 1855 /* 1856 * default to bio-based until DM table is loaded and md->type 1857 * established. If request-based table is loaded: blk-mq will 1858 * override accordingly. 1859 */ 1860 md->disk = blk_alloc_disk(md->numa_node_id); 1861 if (!md->disk) 1862 goto bad; 1863 md->queue = md->disk->queue; 1864 1865 init_waitqueue_head(&md->wait); 1866 INIT_WORK(&md->work, dm_wq_work); 1867 init_waitqueue_head(&md->eventq); 1868 init_completion(&md->kobj_holder.completion); 1869 1870 md->swap_bios = get_swap_bios(); 1871 sema_init(&md->swap_bios_semaphore, md->swap_bios); 1872 mutex_init(&md->swap_bios_lock); 1873 1874 md->disk->major = _major; 1875 md->disk->first_minor = minor; 1876 md->disk->minors = 1; 1877 md->disk->flags |= GENHD_FL_NO_PART; 1878 md->disk->fops = &dm_blk_dops; 1879 md->disk->queue = md->queue; 1880 md->disk->private_data = md; 1881 sprintf(md->disk->disk_name, "dm-%d", minor); 1882 1883 if (IS_ENABLED(CONFIG_FS_DAX)) { 1884 md->dax_dev = alloc_dax(md, &dm_dax_ops); 1885 if (IS_ERR(md->dax_dev)) { 1886 md->dax_dev = NULL; 1887 goto bad; 1888 } 1889 set_dax_nocache(md->dax_dev); 1890 set_dax_nomc(md->dax_dev); 1891 if (dax_add_host(md->dax_dev, md->disk)) 1892 goto bad; 1893 } 1894 1895 format_dev_t(md->name, MKDEV(_major, minor)); 1896 1897 md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name); 1898 if (!md->wq) 1899 goto bad; 1900 1901 md->pending_io = alloc_percpu(unsigned long); 1902 if (!md->pending_io) 1903 goto bad; 1904 1905 dm_stats_init(&md->stats); 1906 1907 /* Populate the mapping, nobody knows we exist yet */ 1908 spin_lock(&_minor_lock); 1909 old_md = idr_replace(&_minor_idr, md, minor); 1910 spin_unlock(&_minor_lock); 1911 1912 BUG_ON(old_md != MINOR_ALLOCED); 1913 1914 return md; 1915 1916 bad: 1917 cleanup_mapped_device(md); 1918 bad_io_barrier: 1919 free_minor(minor); 1920 bad_minor: 1921 module_put(THIS_MODULE); 1922 bad_module_get: 1923 kvfree(md); 1924 return NULL; 1925 } 1926 1927 static void unlock_fs(struct mapped_device *md); 1928 1929 static void free_dev(struct mapped_device *md) 1930 { 1931 int minor = MINOR(disk_devt(md->disk)); 1932 1933 unlock_fs(md); 1934 1935 cleanup_mapped_device(md); 1936 1937 free_table_devices(&md->table_devices); 1938 dm_stats_cleanup(&md->stats); 1939 free_minor(minor); 1940 1941 module_put(THIS_MODULE); 1942 kvfree(md); 1943 } 1944 1945 static int __bind_mempools(struct mapped_device *md, struct dm_table *t) 1946 { 1947 struct dm_md_mempools *p = dm_table_get_md_mempools(t); 1948 int ret = 0; 1949 1950 if (dm_table_bio_based(t)) { 1951 /* 1952 * The md may already have mempools that need changing. 1953 * If so, reload bioset because front_pad may have changed 1954 * because a different table was loaded. 1955 */ 1956 bioset_exit(&md->bs); 1957 bioset_exit(&md->io_bs); 1958 1959 } else if (bioset_initialized(&md->bs)) { 1960 /* 1961 * There's no need to reload with request-based dm 1962 * because the size of front_pad doesn't change. 1963 * Note for future: If you are to reload bioset, 1964 * prep-ed requests in the queue may refer 1965 * to bio from the old bioset, so you must walk 1966 * through the queue to unprep. 1967 */ 1968 goto out; 1969 } 1970 1971 BUG_ON(!p || 1972 bioset_initialized(&md->bs) || 1973 bioset_initialized(&md->io_bs)); 1974 1975 ret = bioset_init_from_src(&md->bs, &p->bs); 1976 if (ret) 1977 goto out; 1978 ret = bioset_init_from_src(&md->io_bs, &p->io_bs); 1979 if (ret) 1980 bioset_exit(&md->bs); 1981 out: 1982 /* mempool bind completed, no longer need any mempools in the table */ 1983 dm_table_free_md_mempools(t); 1984 return ret; 1985 } 1986 1987 /* 1988 * Bind a table to the device. 1989 */ 1990 static void event_callback(void *context) 1991 { 1992 unsigned long flags; 1993 LIST_HEAD(uevents); 1994 struct mapped_device *md = (struct mapped_device *) context; 1995 1996 spin_lock_irqsave(&md->uevent_lock, flags); 1997 list_splice_init(&md->uevent_list, &uevents); 1998 spin_unlock_irqrestore(&md->uevent_lock, flags); 1999 2000 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 2001 2002 atomic_inc(&md->event_nr); 2003 wake_up(&md->eventq); 2004 dm_issue_global_event(); 2005 } 2006 2007 /* 2008 * Returns old map, which caller must destroy. 2009 */ 2010 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2011 struct queue_limits *limits) 2012 { 2013 struct dm_table *old_map; 2014 sector_t size; 2015 int ret; 2016 2017 lockdep_assert_held(&md->suspend_lock); 2018 2019 size = dm_table_get_size(t); 2020 2021 /* 2022 * Wipe any geometry if the size of the table changed. 2023 */ 2024 if (size != dm_get_size(md)) 2025 memset(&md->geometry, 0, sizeof(md->geometry)); 2026 2027 if (!get_capacity(md->disk)) 2028 set_capacity(md->disk, size); 2029 else 2030 set_capacity_and_notify(md->disk, size); 2031 2032 dm_table_event_callback(t, event_callback, md); 2033 2034 if (dm_table_request_based(t)) { 2035 /* 2036 * Leverage the fact that request-based DM targets are 2037 * immutable singletons - used to optimize dm_mq_queue_rq. 2038 */ 2039 md->immutable_target = dm_table_get_immutable_target(t); 2040 } 2041 2042 ret = __bind_mempools(md, t); 2043 if (ret) { 2044 old_map = ERR_PTR(ret); 2045 goto out; 2046 } 2047 2048 ret = dm_table_set_restrictions(t, md->queue, limits); 2049 if (ret) { 2050 old_map = ERR_PTR(ret); 2051 goto out; 2052 } 2053 2054 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2055 rcu_assign_pointer(md->map, (void *)t); 2056 md->immutable_target_type = dm_table_get_immutable_target_type(t); 2057 2058 if (old_map) 2059 dm_sync_table(md); 2060 out: 2061 return old_map; 2062 } 2063 2064 /* 2065 * Returns unbound table for the caller to free. 2066 */ 2067 static struct dm_table *__unbind(struct mapped_device *md) 2068 { 2069 struct dm_table *map = rcu_dereference_protected(md->map, 1); 2070 2071 if (!map) 2072 return NULL; 2073 2074 dm_table_event_callback(map, NULL, NULL); 2075 RCU_INIT_POINTER(md->map, NULL); 2076 dm_sync_table(md); 2077 2078 return map; 2079 } 2080 2081 /* 2082 * Constructor for a new device. 2083 */ 2084 int dm_create(int minor, struct mapped_device **result) 2085 { 2086 struct mapped_device *md; 2087 2088 md = alloc_dev(minor); 2089 if (!md) 2090 return -ENXIO; 2091 2092 dm_ima_reset_data(md); 2093 2094 *result = md; 2095 return 0; 2096 } 2097 2098 /* 2099 * Functions to manage md->type. 2100 * All are required to hold md->type_lock. 2101 */ 2102 void dm_lock_md_type(struct mapped_device *md) 2103 { 2104 mutex_lock(&md->type_lock); 2105 } 2106 2107 void dm_unlock_md_type(struct mapped_device *md) 2108 { 2109 mutex_unlock(&md->type_lock); 2110 } 2111 2112 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2113 { 2114 BUG_ON(!mutex_is_locked(&md->type_lock)); 2115 md->type = type; 2116 } 2117 2118 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2119 { 2120 return md->type; 2121 } 2122 2123 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 2124 { 2125 return md->immutable_target_type; 2126 } 2127 2128 /* 2129 * The queue_limits are only valid as long as you have a reference 2130 * count on 'md'. 2131 */ 2132 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2133 { 2134 BUG_ON(!atomic_read(&md->holders)); 2135 return &md->queue->limits; 2136 } 2137 EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2138 2139 /* 2140 * Setup the DM device's queue based on md's type 2141 */ 2142 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 2143 { 2144 enum dm_queue_mode type = dm_table_get_type(t); 2145 struct queue_limits limits; 2146 int r; 2147 2148 switch (type) { 2149 case DM_TYPE_REQUEST_BASED: 2150 md->disk->fops = &dm_rq_blk_dops; 2151 r = dm_mq_init_request_queue(md, t); 2152 if (r) { 2153 DMERR("Cannot initialize queue for request-based dm mapped device"); 2154 return r; 2155 } 2156 break; 2157 case DM_TYPE_BIO_BASED: 2158 case DM_TYPE_DAX_BIO_BASED: 2159 break; 2160 case DM_TYPE_NONE: 2161 WARN_ON_ONCE(true); 2162 break; 2163 } 2164 2165 r = dm_calculate_queue_limits(t, &limits); 2166 if (r) { 2167 DMERR("Cannot calculate initial queue limits"); 2168 return r; 2169 } 2170 r = dm_table_set_restrictions(t, md->queue, &limits); 2171 if (r) 2172 return r; 2173 2174 r = add_disk(md->disk); 2175 if (r) 2176 return r; 2177 2178 r = dm_sysfs_init(md); 2179 if (r) { 2180 del_gendisk(md->disk); 2181 return r; 2182 } 2183 md->type = type; 2184 return 0; 2185 } 2186 2187 struct mapped_device *dm_get_md(dev_t dev) 2188 { 2189 struct mapped_device *md; 2190 unsigned minor = MINOR(dev); 2191 2192 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 2193 return NULL; 2194 2195 spin_lock(&_minor_lock); 2196 2197 md = idr_find(&_minor_idr, minor); 2198 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 2199 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2200 md = NULL; 2201 goto out; 2202 } 2203 dm_get(md); 2204 out: 2205 spin_unlock(&_minor_lock); 2206 2207 return md; 2208 } 2209 EXPORT_SYMBOL_GPL(dm_get_md); 2210 2211 void *dm_get_mdptr(struct mapped_device *md) 2212 { 2213 return md->interface_ptr; 2214 } 2215 2216 void dm_set_mdptr(struct mapped_device *md, void *ptr) 2217 { 2218 md->interface_ptr = ptr; 2219 } 2220 2221 void dm_get(struct mapped_device *md) 2222 { 2223 atomic_inc(&md->holders); 2224 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2225 } 2226 2227 int dm_hold(struct mapped_device *md) 2228 { 2229 spin_lock(&_minor_lock); 2230 if (test_bit(DMF_FREEING, &md->flags)) { 2231 spin_unlock(&_minor_lock); 2232 return -EBUSY; 2233 } 2234 dm_get(md); 2235 spin_unlock(&_minor_lock); 2236 return 0; 2237 } 2238 EXPORT_SYMBOL_GPL(dm_hold); 2239 2240 const char *dm_device_name(struct mapped_device *md) 2241 { 2242 return md->name; 2243 } 2244 EXPORT_SYMBOL_GPL(dm_device_name); 2245 2246 static void __dm_destroy(struct mapped_device *md, bool wait) 2247 { 2248 struct dm_table *map; 2249 int srcu_idx; 2250 2251 might_sleep(); 2252 2253 spin_lock(&_minor_lock); 2254 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2255 set_bit(DMF_FREEING, &md->flags); 2256 spin_unlock(&_minor_lock); 2257 2258 blk_mark_disk_dead(md->disk); 2259 2260 /* 2261 * Take suspend_lock so that presuspend and postsuspend methods 2262 * do not race with internal suspend. 2263 */ 2264 mutex_lock(&md->suspend_lock); 2265 map = dm_get_live_table(md, &srcu_idx); 2266 if (!dm_suspended_md(md)) { 2267 dm_table_presuspend_targets(map); 2268 set_bit(DMF_SUSPENDED, &md->flags); 2269 set_bit(DMF_POST_SUSPENDING, &md->flags); 2270 dm_table_postsuspend_targets(map); 2271 } 2272 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2273 dm_put_live_table(md, srcu_idx); 2274 mutex_unlock(&md->suspend_lock); 2275 2276 /* 2277 * Rare, but there may be I/O requests still going to complete, 2278 * for example. Wait for all references to disappear. 2279 * No one should increment the reference count of the mapped_device, 2280 * after the mapped_device state becomes DMF_FREEING. 2281 */ 2282 if (wait) 2283 while (atomic_read(&md->holders)) 2284 msleep(1); 2285 else if (atomic_read(&md->holders)) 2286 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2287 dm_device_name(md), atomic_read(&md->holders)); 2288 2289 dm_table_destroy(__unbind(md)); 2290 free_dev(md); 2291 } 2292 2293 void dm_destroy(struct mapped_device *md) 2294 { 2295 __dm_destroy(md, true); 2296 } 2297 2298 void dm_destroy_immediate(struct mapped_device *md) 2299 { 2300 __dm_destroy(md, false); 2301 } 2302 2303 void dm_put(struct mapped_device *md) 2304 { 2305 atomic_dec(&md->holders); 2306 } 2307 EXPORT_SYMBOL_GPL(dm_put); 2308 2309 static bool dm_in_flight_bios(struct mapped_device *md) 2310 { 2311 int cpu; 2312 unsigned long sum = 0; 2313 2314 for_each_possible_cpu(cpu) 2315 sum += *per_cpu_ptr(md->pending_io, cpu); 2316 2317 return sum != 0; 2318 } 2319 2320 static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state) 2321 { 2322 int r = 0; 2323 DEFINE_WAIT(wait); 2324 2325 while (true) { 2326 prepare_to_wait(&md->wait, &wait, task_state); 2327 2328 if (!dm_in_flight_bios(md)) 2329 break; 2330 2331 if (signal_pending_state(task_state, current)) { 2332 r = -EINTR; 2333 break; 2334 } 2335 2336 io_schedule(); 2337 } 2338 finish_wait(&md->wait, &wait); 2339 2340 smp_rmb(); 2341 2342 return r; 2343 } 2344 2345 static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state) 2346 { 2347 int r = 0; 2348 2349 if (!queue_is_mq(md->queue)) 2350 return dm_wait_for_bios_completion(md, task_state); 2351 2352 while (true) { 2353 if (!blk_mq_queue_inflight(md->queue)) 2354 break; 2355 2356 if (signal_pending_state(task_state, current)) { 2357 r = -EINTR; 2358 break; 2359 } 2360 2361 msleep(5); 2362 } 2363 2364 return r; 2365 } 2366 2367 /* 2368 * Process the deferred bios 2369 */ 2370 static void dm_wq_work(struct work_struct *work) 2371 { 2372 struct mapped_device *md = container_of(work, struct mapped_device, work); 2373 struct bio *bio; 2374 2375 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2376 spin_lock_irq(&md->deferred_lock); 2377 bio = bio_list_pop(&md->deferred); 2378 spin_unlock_irq(&md->deferred_lock); 2379 2380 if (!bio) 2381 break; 2382 2383 submit_bio_noacct(bio); 2384 } 2385 } 2386 2387 static void dm_queue_flush(struct mapped_device *md) 2388 { 2389 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2390 smp_mb__after_atomic(); 2391 queue_work(md->wq, &md->work); 2392 } 2393 2394 /* 2395 * Swap in a new table, returning the old one for the caller to destroy. 2396 */ 2397 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 2398 { 2399 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2400 struct queue_limits limits; 2401 int r; 2402 2403 mutex_lock(&md->suspend_lock); 2404 2405 /* device must be suspended */ 2406 if (!dm_suspended_md(md)) 2407 goto out; 2408 2409 /* 2410 * If the new table has no data devices, retain the existing limits. 2411 * This helps multipath with queue_if_no_path if all paths disappear, 2412 * then new I/O is queued based on these limits, and then some paths 2413 * reappear. 2414 */ 2415 if (dm_table_has_no_data_devices(table)) { 2416 live_map = dm_get_live_table_fast(md); 2417 if (live_map) 2418 limits = md->queue->limits; 2419 dm_put_live_table_fast(md); 2420 } 2421 2422 if (!live_map) { 2423 r = dm_calculate_queue_limits(table, &limits); 2424 if (r) { 2425 map = ERR_PTR(r); 2426 goto out; 2427 } 2428 } 2429 2430 map = __bind(md, table, &limits); 2431 dm_issue_global_event(); 2432 2433 out: 2434 mutex_unlock(&md->suspend_lock); 2435 return map; 2436 } 2437 2438 /* 2439 * Functions to lock and unlock any filesystem running on the 2440 * device. 2441 */ 2442 static int lock_fs(struct mapped_device *md) 2443 { 2444 int r; 2445 2446 WARN_ON(test_bit(DMF_FROZEN, &md->flags)); 2447 2448 r = freeze_bdev(md->disk->part0); 2449 if (!r) 2450 set_bit(DMF_FROZEN, &md->flags); 2451 return r; 2452 } 2453 2454 static void unlock_fs(struct mapped_device *md) 2455 { 2456 if (!test_bit(DMF_FROZEN, &md->flags)) 2457 return; 2458 thaw_bdev(md->disk->part0); 2459 clear_bit(DMF_FROZEN, &md->flags); 2460 } 2461 2462 /* 2463 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2464 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2465 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2466 * 2467 * If __dm_suspend returns 0, the device is completely quiescent 2468 * now. There is no request-processing activity. All new requests 2469 * are being added to md->deferred list. 2470 */ 2471 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2472 unsigned suspend_flags, unsigned int task_state, 2473 int dmf_suspended_flag) 2474 { 2475 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2476 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2477 int r; 2478 2479 lockdep_assert_held(&md->suspend_lock); 2480 2481 /* 2482 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 2483 * This flag is cleared before dm_suspend returns. 2484 */ 2485 if (noflush) 2486 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2487 else 2488 DMDEBUG("%s: suspending with flush", dm_device_name(md)); 2489 2490 /* 2491 * This gets reverted if there's an error later and the targets 2492 * provide the .presuspend_undo hook. 2493 */ 2494 dm_table_presuspend_targets(map); 2495 2496 /* 2497 * Flush I/O to the device. 2498 * Any I/O submitted after lock_fs() may not be flushed. 2499 * noflush takes precedence over do_lockfs. 2500 * (lock_fs() flushes I/Os and waits for them to complete.) 2501 */ 2502 if (!noflush && do_lockfs) { 2503 r = lock_fs(md); 2504 if (r) { 2505 dm_table_presuspend_undo_targets(map); 2506 return r; 2507 } 2508 } 2509 2510 /* 2511 * Here we must make sure that no processes are submitting requests 2512 * to target drivers i.e. no one may be executing 2513 * dm_split_and_process_bio from dm_submit_bio. 2514 * 2515 * To get all processes out of dm_split_and_process_bio in dm_submit_bio, 2516 * we take the write lock. To prevent any process from reentering 2517 * dm_split_and_process_bio from dm_submit_bio and quiesce the thread 2518 * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call 2519 * flush_workqueue(md->wq). 2520 */ 2521 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2522 if (map) 2523 synchronize_srcu(&md->io_barrier); 2524 2525 /* 2526 * Stop md->queue before flushing md->wq in case request-based 2527 * dm defers requests to md->wq from md->queue. 2528 */ 2529 if (dm_request_based(md)) 2530 dm_stop_queue(md->queue); 2531 2532 flush_workqueue(md->wq); 2533 2534 /* 2535 * At this point no more requests are entering target request routines. 2536 * We call dm_wait_for_completion to wait for all existing requests 2537 * to finish. 2538 */ 2539 r = dm_wait_for_completion(md, task_state); 2540 if (!r) 2541 set_bit(dmf_suspended_flag, &md->flags); 2542 2543 if (noflush) 2544 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2545 if (map) 2546 synchronize_srcu(&md->io_barrier); 2547 2548 /* were we interrupted ? */ 2549 if (r < 0) { 2550 dm_queue_flush(md); 2551 2552 if (dm_request_based(md)) 2553 dm_start_queue(md->queue); 2554 2555 unlock_fs(md); 2556 dm_table_presuspend_undo_targets(map); 2557 /* pushback list is already flushed, so skip flush */ 2558 } 2559 2560 return r; 2561 } 2562 2563 /* 2564 * We need to be able to change a mapping table under a mounted 2565 * filesystem. For example we might want to move some data in 2566 * the background. Before the table can be swapped with 2567 * dm_bind_table, dm_suspend must be called to flush any in 2568 * flight bios and ensure that any further io gets deferred. 2569 */ 2570 /* 2571 * Suspend mechanism in request-based dm. 2572 * 2573 * 1. Flush all I/Os by lock_fs() if needed. 2574 * 2. Stop dispatching any I/O by stopping the request_queue. 2575 * 3. Wait for all in-flight I/Os to be completed or requeued. 2576 * 2577 * To abort suspend, start the request_queue. 2578 */ 2579 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2580 { 2581 struct dm_table *map = NULL; 2582 int r = 0; 2583 2584 retry: 2585 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2586 2587 if (dm_suspended_md(md)) { 2588 r = -EINVAL; 2589 goto out_unlock; 2590 } 2591 2592 if (dm_suspended_internally_md(md)) { 2593 /* already internally suspended, wait for internal resume */ 2594 mutex_unlock(&md->suspend_lock); 2595 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2596 if (r) 2597 return r; 2598 goto retry; 2599 } 2600 2601 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2602 2603 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2604 if (r) 2605 goto out_unlock; 2606 2607 set_bit(DMF_POST_SUSPENDING, &md->flags); 2608 dm_table_postsuspend_targets(map); 2609 clear_bit(DMF_POST_SUSPENDING, &md->flags); 2610 2611 out_unlock: 2612 mutex_unlock(&md->suspend_lock); 2613 return r; 2614 } 2615 2616 static int __dm_resume(struct mapped_device *md, struct dm_table *map) 2617 { 2618 if (map) { 2619 int r = dm_table_resume_targets(map); 2620 if (r) 2621 return r; 2622 } 2623 2624 dm_queue_flush(md); 2625 2626 /* 2627 * Flushing deferred I/Os must be done after targets are resumed 2628 * so that mapping of targets can work correctly. 2629 * Request-based dm is queueing the deferred I/Os in its request_queue. 2630 */ 2631 if (dm_request_based(md)) 2632 dm_start_queue(md->queue); 2633 2634 unlock_fs(md); 2635 2636 return 0; 2637 } 2638 2639 int dm_resume(struct mapped_device *md) 2640 { 2641 int r; 2642 struct dm_table *map = NULL; 2643 2644 retry: 2645 r = -EINVAL; 2646 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2647 2648 if (!dm_suspended_md(md)) 2649 goto out; 2650 2651 if (dm_suspended_internally_md(md)) { 2652 /* already internally suspended, wait for internal resume */ 2653 mutex_unlock(&md->suspend_lock); 2654 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2655 if (r) 2656 return r; 2657 goto retry; 2658 } 2659 2660 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2661 if (!map || !dm_table_get_size(map)) 2662 goto out; 2663 2664 r = __dm_resume(md, map); 2665 if (r) 2666 goto out; 2667 2668 clear_bit(DMF_SUSPENDED, &md->flags); 2669 out: 2670 mutex_unlock(&md->suspend_lock); 2671 2672 return r; 2673 } 2674 2675 /* 2676 * Internal suspend/resume works like userspace-driven suspend. It waits 2677 * until all bios finish and prevents issuing new bios to the target drivers. 2678 * It may be used only from the kernel. 2679 */ 2680 2681 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2682 { 2683 struct dm_table *map = NULL; 2684 2685 lockdep_assert_held(&md->suspend_lock); 2686 2687 if (md->internal_suspend_count++) 2688 return; /* nested internal suspend */ 2689 2690 if (dm_suspended_md(md)) { 2691 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2692 return; /* nest suspend */ 2693 } 2694 2695 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2696 2697 /* 2698 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2699 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2700 * would require changing .presuspend to return an error -- avoid this 2701 * until there is a need for more elaborate variants of internal suspend. 2702 */ 2703 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2704 DMF_SUSPENDED_INTERNALLY); 2705 2706 set_bit(DMF_POST_SUSPENDING, &md->flags); 2707 dm_table_postsuspend_targets(map); 2708 clear_bit(DMF_POST_SUSPENDING, &md->flags); 2709 } 2710 2711 static void __dm_internal_resume(struct mapped_device *md) 2712 { 2713 BUG_ON(!md->internal_suspend_count); 2714 2715 if (--md->internal_suspend_count) 2716 return; /* resume from nested internal suspend */ 2717 2718 if (dm_suspended_md(md)) 2719 goto done; /* resume from nested suspend */ 2720 2721 /* 2722 * NOTE: existing callers don't need to call dm_table_resume_targets 2723 * (which may fail -- so best to avoid it for now by passing NULL map) 2724 */ 2725 (void) __dm_resume(md, NULL); 2726 2727 done: 2728 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2729 smp_mb__after_atomic(); 2730 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2731 } 2732 2733 void dm_internal_suspend_noflush(struct mapped_device *md) 2734 { 2735 mutex_lock(&md->suspend_lock); 2736 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2737 mutex_unlock(&md->suspend_lock); 2738 } 2739 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2740 2741 void dm_internal_resume(struct mapped_device *md) 2742 { 2743 mutex_lock(&md->suspend_lock); 2744 __dm_internal_resume(md); 2745 mutex_unlock(&md->suspend_lock); 2746 } 2747 EXPORT_SYMBOL_GPL(dm_internal_resume); 2748 2749 /* 2750 * Fast variants of internal suspend/resume hold md->suspend_lock, 2751 * which prevents interaction with userspace-driven suspend. 2752 */ 2753 2754 void dm_internal_suspend_fast(struct mapped_device *md) 2755 { 2756 mutex_lock(&md->suspend_lock); 2757 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2758 return; 2759 2760 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2761 synchronize_srcu(&md->io_barrier); 2762 flush_workqueue(md->wq); 2763 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2764 } 2765 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2766 2767 void dm_internal_resume_fast(struct mapped_device *md) 2768 { 2769 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2770 goto done; 2771 2772 dm_queue_flush(md); 2773 2774 done: 2775 mutex_unlock(&md->suspend_lock); 2776 } 2777 EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2778 2779 /*----------------------------------------------------------------- 2780 * Event notification. 2781 *---------------------------------------------------------------*/ 2782 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 2783 unsigned cookie) 2784 { 2785 int r; 2786 unsigned noio_flag; 2787 char udev_cookie[DM_COOKIE_LENGTH]; 2788 char *envp[] = { udev_cookie, NULL }; 2789 2790 noio_flag = memalloc_noio_save(); 2791 2792 if (!cookie) 2793 r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 2794 else { 2795 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 2796 DM_COOKIE_ENV_VAR_NAME, cookie); 2797 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 2798 action, envp); 2799 } 2800 2801 memalloc_noio_restore(noio_flag); 2802 2803 return r; 2804 } 2805 2806 uint32_t dm_next_uevent_seq(struct mapped_device *md) 2807 { 2808 return atomic_add_return(1, &md->uevent_seq); 2809 } 2810 2811 uint32_t dm_get_event_nr(struct mapped_device *md) 2812 { 2813 return atomic_read(&md->event_nr); 2814 } 2815 2816 int dm_wait_event(struct mapped_device *md, int event_nr) 2817 { 2818 return wait_event_interruptible(md->eventq, 2819 (event_nr != atomic_read(&md->event_nr))); 2820 } 2821 2822 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 2823 { 2824 unsigned long flags; 2825 2826 spin_lock_irqsave(&md->uevent_lock, flags); 2827 list_add(elist, &md->uevent_list); 2828 spin_unlock_irqrestore(&md->uevent_lock, flags); 2829 } 2830 2831 /* 2832 * The gendisk is only valid as long as you have a reference 2833 * count on 'md'. 2834 */ 2835 struct gendisk *dm_disk(struct mapped_device *md) 2836 { 2837 return md->disk; 2838 } 2839 EXPORT_SYMBOL_GPL(dm_disk); 2840 2841 struct kobject *dm_kobject(struct mapped_device *md) 2842 { 2843 return &md->kobj_holder.kobj; 2844 } 2845 2846 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2847 { 2848 struct mapped_device *md; 2849 2850 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2851 2852 spin_lock(&_minor_lock); 2853 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2854 md = NULL; 2855 goto out; 2856 } 2857 dm_get(md); 2858 out: 2859 spin_unlock(&_minor_lock); 2860 2861 return md; 2862 } 2863 2864 int dm_suspended_md(struct mapped_device *md) 2865 { 2866 return test_bit(DMF_SUSPENDED, &md->flags); 2867 } 2868 2869 static int dm_post_suspending_md(struct mapped_device *md) 2870 { 2871 return test_bit(DMF_POST_SUSPENDING, &md->flags); 2872 } 2873 2874 int dm_suspended_internally_md(struct mapped_device *md) 2875 { 2876 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2877 } 2878 2879 int dm_test_deferred_remove_flag(struct mapped_device *md) 2880 { 2881 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 2882 } 2883 2884 int dm_suspended(struct dm_target *ti) 2885 { 2886 return dm_suspended_md(ti->table->md); 2887 } 2888 EXPORT_SYMBOL_GPL(dm_suspended); 2889 2890 int dm_post_suspending(struct dm_target *ti) 2891 { 2892 return dm_post_suspending_md(ti->table->md); 2893 } 2894 EXPORT_SYMBOL_GPL(dm_post_suspending); 2895 2896 int dm_noflush_suspending(struct dm_target *ti) 2897 { 2898 return __noflush_suspending(ti->table->md); 2899 } 2900 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2901 2902 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 2903 unsigned integrity, unsigned per_io_data_size, 2904 unsigned min_pool_size) 2905 { 2906 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 2907 unsigned int pool_size = 0; 2908 unsigned int front_pad, io_front_pad; 2909 int ret; 2910 2911 if (!pools) 2912 return NULL; 2913 2914 switch (type) { 2915 case DM_TYPE_BIO_BASED: 2916 case DM_TYPE_DAX_BIO_BASED: 2917 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 2918 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET; 2919 io_front_pad = roundup(per_io_data_size, __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET; 2920 ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); 2921 if (ret) 2922 goto out; 2923 if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) 2924 goto out; 2925 break; 2926 case DM_TYPE_REQUEST_BASED: 2927 pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 2928 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2929 /* per_io_data_size is used for blk-mq pdu at queue allocation */ 2930 break; 2931 default: 2932 BUG(); 2933 } 2934 2935 ret = bioset_init(&pools->bs, pool_size, front_pad, 0); 2936 if (ret) 2937 goto out; 2938 2939 if (integrity && bioset_integrity_create(&pools->bs, pool_size)) 2940 goto out; 2941 2942 return pools; 2943 2944 out: 2945 dm_free_md_mempools(pools); 2946 2947 return NULL; 2948 } 2949 2950 void dm_free_md_mempools(struct dm_md_mempools *pools) 2951 { 2952 if (!pools) 2953 return; 2954 2955 bioset_exit(&pools->bs); 2956 bioset_exit(&pools->io_bs); 2957 2958 kfree(pools); 2959 } 2960 2961 struct dm_pr { 2962 u64 old_key; 2963 u64 new_key; 2964 u32 flags; 2965 bool fail_early; 2966 }; 2967 2968 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 2969 void *data) 2970 { 2971 struct mapped_device *md = bdev->bd_disk->private_data; 2972 struct dm_table *table; 2973 struct dm_target *ti; 2974 int ret = -ENOTTY, srcu_idx; 2975 2976 table = dm_get_live_table(md, &srcu_idx); 2977 if (!table || !dm_table_get_size(table)) 2978 goto out; 2979 2980 /* We only support devices that have a single target */ 2981 if (dm_table_get_num_targets(table) != 1) 2982 goto out; 2983 ti = dm_table_get_target(table, 0); 2984 2985 ret = -EINVAL; 2986 if (!ti->type->iterate_devices) 2987 goto out; 2988 2989 ret = ti->type->iterate_devices(ti, fn, data); 2990 out: 2991 dm_put_live_table(md, srcu_idx); 2992 return ret; 2993 } 2994 2995 /* 2996 * For register / unregister we need to manually call out to every path. 2997 */ 2998 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 2999 sector_t start, sector_t len, void *data) 3000 { 3001 struct dm_pr *pr = data; 3002 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3003 3004 if (!ops || !ops->pr_register) 3005 return -EOPNOTSUPP; 3006 return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 3007 } 3008 3009 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 3010 u32 flags) 3011 { 3012 struct dm_pr pr = { 3013 .old_key = old_key, 3014 .new_key = new_key, 3015 .flags = flags, 3016 .fail_early = true, 3017 }; 3018 int ret; 3019 3020 ret = dm_call_pr(bdev, __dm_pr_register, &pr); 3021 if (ret && new_key) { 3022 /* unregister all paths if we failed to register any path */ 3023 pr.old_key = new_key; 3024 pr.new_key = 0; 3025 pr.flags = 0; 3026 pr.fail_early = false; 3027 dm_call_pr(bdev, __dm_pr_register, &pr); 3028 } 3029 3030 return ret; 3031 } 3032 3033 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 3034 u32 flags) 3035 { 3036 struct mapped_device *md = bdev->bd_disk->private_data; 3037 const struct pr_ops *ops; 3038 int r, srcu_idx; 3039 3040 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3041 if (r < 0) 3042 goto out; 3043 3044 ops = bdev->bd_disk->fops->pr_ops; 3045 if (ops && ops->pr_reserve) 3046 r = ops->pr_reserve(bdev, key, type, flags); 3047 else 3048 r = -EOPNOTSUPP; 3049 out: 3050 dm_unprepare_ioctl(md, srcu_idx); 3051 return r; 3052 } 3053 3054 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 3055 { 3056 struct mapped_device *md = bdev->bd_disk->private_data; 3057 const struct pr_ops *ops; 3058 int r, srcu_idx; 3059 3060 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3061 if (r < 0) 3062 goto out; 3063 3064 ops = bdev->bd_disk->fops->pr_ops; 3065 if (ops && ops->pr_release) 3066 r = ops->pr_release(bdev, key, type); 3067 else 3068 r = -EOPNOTSUPP; 3069 out: 3070 dm_unprepare_ioctl(md, srcu_idx); 3071 return r; 3072 } 3073 3074 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 3075 enum pr_type type, bool abort) 3076 { 3077 struct mapped_device *md = bdev->bd_disk->private_data; 3078 const struct pr_ops *ops; 3079 int r, srcu_idx; 3080 3081 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3082 if (r < 0) 3083 goto out; 3084 3085 ops = bdev->bd_disk->fops->pr_ops; 3086 if (ops && ops->pr_preempt) 3087 r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 3088 else 3089 r = -EOPNOTSUPP; 3090 out: 3091 dm_unprepare_ioctl(md, srcu_idx); 3092 return r; 3093 } 3094 3095 static int dm_pr_clear(struct block_device *bdev, u64 key) 3096 { 3097 struct mapped_device *md = bdev->bd_disk->private_data; 3098 const struct pr_ops *ops; 3099 int r, srcu_idx; 3100 3101 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3102 if (r < 0) 3103 goto out; 3104 3105 ops = bdev->bd_disk->fops->pr_ops; 3106 if (ops && ops->pr_clear) 3107 r = ops->pr_clear(bdev, key); 3108 else 3109 r = -EOPNOTSUPP; 3110 out: 3111 dm_unprepare_ioctl(md, srcu_idx); 3112 return r; 3113 } 3114 3115 static const struct pr_ops dm_pr_ops = { 3116 .pr_register = dm_pr_register, 3117 .pr_reserve = dm_pr_reserve, 3118 .pr_release = dm_pr_release, 3119 .pr_preempt = dm_pr_preempt, 3120 .pr_clear = dm_pr_clear, 3121 }; 3122 3123 static const struct block_device_operations dm_blk_dops = { 3124 .submit_bio = dm_submit_bio, 3125 .poll_bio = dm_poll_bio, 3126 .open = dm_blk_open, 3127 .release = dm_blk_close, 3128 .ioctl = dm_blk_ioctl, 3129 .getgeo = dm_blk_getgeo, 3130 .report_zones = dm_blk_report_zones, 3131 .pr_ops = &dm_pr_ops, 3132 .owner = THIS_MODULE 3133 }; 3134 3135 static const struct block_device_operations dm_rq_blk_dops = { 3136 .open = dm_blk_open, 3137 .release = dm_blk_close, 3138 .ioctl = dm_blk_ioctl, 3139 .getgeo = dm_blk_getgeo, 3140 .pr_ops = &dm_pr_ops, 3141 .owner = THIS_MODULE 3142 }; 3143 3144 static const struct dax_operations dm_dax_ops = { 3145 .direct_access = dm_dax_direct_access, 3146 .zero_page_range = dm_dax_zero_page_range, 3147 }; 3148 3149 /* 3150 * module hooks 3151 */ 3152 module_init(dm_init); 3153 module_exit(dm_exit); 3154 3155 module_param(major, uint, 0); 3156 MODULE_PARM_DESC(major, "The major number of the device mapper"); 3157 3158 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3159 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3160 3161 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3162 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3163 3164 module_param(swap_bios, int, S_IRUGO | S_IWUSR); 3165 MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs"); 3166 3167 MODULE_DESCRIPTION(DM_NAME " driver"); 3168 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3169 MODULE_LICENSE("GPL"); 3170